ot: offlinetest
offlinetest: codetest
- nosetests --verbose test --exclude test_download.py --exclude test_age_restriction.py --exclude test_subtitles.py --exclude test_write_annotations.py --exclude test_youtube_lists.py
+ nosetests --verbose test --exclude test_download.py --exclude test_age_restriction.py --exclude test_subtitles.py --exclude test_write_annotations.py --exclude test_youtube_lists.py --exclude test_iqiyi_sdk_interpreter.py
tar: youtube-dl.tar.gz
chmod a+x youtube-dl
README.md: youtube_dl/*.py youtube_dl/*/*.py
- COLUMNS=80 python youtube_dl/__main__.py --help | python devscripts/make_readme.py
+ COLUMNS=80 $(PYTHON) youtube_dl/__main__.py --help | $(PYTHON) devscripts/make_readme.py
CONTRIBUTING.md: README.md
- python devscripts/make_contributing.py README.md CONTRIBUTING.md
+ $(PYTHON) devscripts/make_contributing.py README.md CONTRIBUTING.md
supportedsites:
- python devscripts/make_supportedsites.py docs/supportedsites.md
+ $(PYTHON) devscripts/make_supportedsites.py docs/supportedsites.md
README.txt: README.md
pandoc -f markdown -t plain README.md -o README.txt
youtube-dl.1: README.md
- python devscripts/prepare_manpage.py >youtube-dl.1.temp.md
+ $(PYTHON) devscripts/prepare_manpage.py >youtube-dl.1.temp.md
pandoc -s -f markdown -t man youtube-dl.1.temp.md -o youtube-dl.1
rm -f youtube-dl.1.temp.md
youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-completion.in
- python devscripts/bash-completion.py
+ $(PYTHON) devscripts/bash-completion.py
bash-completion: youtube-dl.bash-completion
youtube-dl.zsh: youtube_dl/*.py youtube_dl/*/*.py devscripts/zsh-completion.in
- python devscripts/zsh-completion.py
+ $(PYTHON) devscripts/zsh-completion.py
zsh-completion: youtube-dl.zsh
youtube-dl.fish: youtube_dl/*.py youtube_dl/*/*.py devscripts/fish-completion.in
- python devscripts/fish-completion.py
+ $(PYTHON) devscripts/fish-completion.py
fish-completion: youtube-dl.fish
sudo pip install youtube-dl
-Alternatively, refer to the [developer instructions](#developer-instructions) for how to check out and work with the git repository. For further options, including PGP signatures, see https://rg3.github.io/youtube-dl/download.html .
+Alternatively, refer to the [developer instructions](#developer-instructions) for how to check out and work with the git repository. For further options, including PGP signatures, see the [youtube-dl Download Page](https://rg3.github.io/youtube-dl/download.html).
# DESCRIPTION
**youtube-dl** is a small command-line program to download videos from
expected filesize (experimental)
--hls-prefer-native Use the native HLS downloader instead of
ffmpeg (experimental)
+ --hls-use-mpegts Use the mpegts container for HLS videos,
+ allowing to play the video while
+ downloading (some players may not be able
+ to play it)
--external-downloader COMMAND Use the specified external downloader.
Currently supports
aria2c,axel,curl,httpie,wget
--all-formats Download all available video formats
--prefer-free-formats Prefer free video formats unless a specific
one is requested
- -F, --list-formats List all available formats of specified
+ -F, --list-formats List all available formats of requested
videos
--youtube-skip-dash-manifest Do not download the DASH manifests and
related data on YouTube videos
preference, for example: "srt" or
"ass/srt/best"
--sub-lang LANGS Languages of the subtitles to download
- (optional) separated by commas, use IETF
- language tags like 'en,pt'
+ (optional) separated by commas, use --list-
+ subs for available language tags
## Authentication Options:
-u, --username USERNAME Login with this account ID
downloading, similar to find's -exec
syntax. Example: --exec 'adb push {}
/sdcard/Music/ && rm {}'
- --convert-subtitles FORMAT Convert the subtitles to other format
+ --convert-subs FORMAT Convert the subtitles to other format
(currently supported: srt|ass|vtt)
# CONFIGURATION
You can use `--ignore-config` if you want to disable the configuration file for a particular youtube-dl run.
-### Authentication with `.netrc` file ###
+### Authentication with `.netrc` file
You may also want to configure automatic credentials storage for extractors that support authentication (by providing login and password with `--username` and `--password`) in order not to pass credentials as command line arguments on every youtube-dl execution and prevent tracking plain text passwords in the shell command history. You can achieve this using a [`.netrc` file](http://stackoverflow.com/tags/.netrc/info) on per extractor basis. For that you will need to create a`.netrc` file in your `$HOME` and restrict permissions to read/write by you only:
```
The `-o` option allows users to indicate a template for the output file names. The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parentheses, followed by a lowercase S. Allowed names are:
- - `id`: The sequence will be replaced by the video identifier.
- - `url`: The sequence will be replaced by the video URL.
- - `uploader`: The sequence will be replaced by the nickname of the person who uploaded the video.
- - `upload_date`: The sequence will be replaced by the upload date in YYYYMMDD format.
- - `title`: The sequence will be replaced by the video title.
- - `ext`: The sequence will be replaced by the appropriate extension (like flv or mp4).
- - `epoch`: The sequence will be replaced by the Unix epoch when creating the file.
- - `autonumber`: The sequence will be replaced by a five-digit number that will be increased with each download, starting at zero.
- - `playlist`: The sequence will be replaced by the name or the id of the playlist that contains the video.
- - `playlist_index`: The sequence will be replaced by the index of the video in the playlist padded with leading zeros according to the total length of the playlist.
- - `format_id`: The sequence will be replaced by the format code specified by `--format`.
- - `duration`: The sequence will be replaced by the length of the video in seconds.
+ - `id`: Video identifier
+ - `title`: Video title
+ - `url`: Video URL
+ - `ext`: Video filename extension
+ - `alt_title`: A secondary title of the video
+ - `display_id`: An alternative identifier for the video
+ - `uploader`: Full name of the video uploader
+ - `creator`: The main artist who created the video
+ - `release_date`: The date (YYYYMMDD) when the video was released
+ - `timestamp`: UNIX timestamp of the moment the video became available
+ - `upload_date`: Video upload date (YYYYMMDD)
+ - `uploader_id`: Nickname or id of the video uploader
+ - `location`: Physical location where the video was filmed
+ - `duration`: Length of the video in seconds
+ - `view_count`: How many users have watched the video on the platform
+ - `like_count`: Number of positive ratings of the video
+ - `dislike_count`: Number of negative ratings of the video
+ - `repost_count`: Number of reposts of the video
+ - `average_rating`: Average rating give by users, the scale used depends on the webpage
+ - `comment_count`: Number of comments on the video
+ - `age_limit`: Age restriction for the video (years)
+ - `format`: A human-readable description of the format
+ - `format_id`: Format code specified by `--format`
+ - `format_note`: Additional info about the format
+ - `width`: Width of the video
+ - `height`: Height of the video
+ - `resolution`: Textual description of width and height
+ - `tbr`: Average bitrate of audio and video in KBit/s
+ - `abr`: Average audio bitrate in KBit/s
+ - `acodec`: Name of the audio codec in use
+ - `asr`: Audio sampling rate in Hertz
+ - `vbr`: Average video bitrate in KBit/s
+ - `fps`: Frame rate
+ - `vcodec`: Name of the video codec in use
+ - `container`: Name of the container format
+ - `filesize`: The number of bytes, if known in advance
+ - `filesize_approx`: An estimate for the number of bytes
+ - `protocol`: The protocol that will be used for the actual download
+ - `extractor`: Name of the extractor
+ - `extractor_key`: Key name of the extractor
+ - `epoch`: Unix epoch when creating the file
+ - `autonumber`: Five-digit number that will be increased with each download, starting at zero
+ - `playlist`: Name or id of the playlist that contains the video
+ - `playlist_index`: Index of the video in the playlist padded with leading zeros according to the total length of the playlist
+
+Available for the video that belongs to some logical chapter or section:
+ - `chapter`: Name or title of the chapter the video belongs to
+ - `chapter_number`: Number of the chapter the video belongs to
+ - `chapter_id`: Id of the chapter the video belongs to
+
+Available for the video that is an episode of some series or programme:
+ - `series`: Title of the series or programme the video episode belongs to
+ - `season`: Title of the season the video episode belongs to
+ - `season_number`: Number of the season the video episode belongs to
+ - `season_id`: Id of the season the video episode belongs to
+ - `episode`: Title of the video episode
+ - `episode_number`: Number of the video episode within a season
+ - `episode_id`: Id of the video episode
+
+Each aforementioned sequence when referenced in output template will be replaced by the actual value corresponding to the sequence name. Note that some of the sequences are not guaranteed to be present since they depend on the metadata obtained by particular extractor, such sequences will be replaced with `NA`.
+
+For example for `-o %(title)s-%(id)s.%(ext)s` and mp4 video with title `youtube-dl test video` and id `BaW_jenozKcj` this will result in a `youtube-dl test video-BaW_jenozKcj.mp4` file created in the current directory.
+
+Output template can also contain arbitrary hierarchical path, e.g. `-o '%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s'` that will result in downloading each video in a directory corresponding to this path template. Any missing directory will be automatically created for you.
+
+To specify percent literal in output template use `%%`. To output to stdout use `-o -`.
The current default template is `%(title)s-%(id)s.%(ext)s`.
In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
+Examples (note on Windows you may need to use double quotes instead of single):
+
```bash
-$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
+$ youtube-dl --get-filename -o '%(title)s.%(ext)s' BaW_jenozKc
youtube-dl test video ''_ä↭𝕐.mp4 # All kinds of weird characters
-$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
+
+$ youtube-dl --get-filename -o '%(title)s.%(ext)s' BaW_jenozKc --restrict-filenames
youtube-dl_test_video_.mp4 # A simple file name
+
+# Download YouTube playlist videos in separate directory indexed by video order in a playlist
+$ youtube-dl -o '%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s' https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re
+
+# Download Udemy course keeping each chapter in separate directory under MyVideos directory in your home
+$ youtube-dl -u user -p password -o '~/MyVideos/%(playlist)s/%(chapter_number)s - %(chapter)s/%(title)s.%(ext)s' https://www.udemy.com/java-tutorial/
+
+# Download entire series season keeping each series and each season in separate directory under C:/MyVideos
+$ youtube-dl -o "C:/MyVideos/%(series)s/%(season_number)s - %(season)s/%(episode_number)s - %(episode)s.%(ext)s" http://videomore.ru/kino_v_detalayah/5_sezon/367617
+
+# Stream the video being downloaded to stdout
+$ youtube-dl -o - BaW_jenozKc
```
# FORMAT SELECTION
-By default youtube-dl tries to download the best quality, but sometimes you may want to download in a different format.
-The simplest case is requesting a specific format, for example `-f 22`. You can get the list of available formats using `--list-formats`, you can also use a file extension (currently it supports aac, m4a, mp3, mp4, ogg, wav, webm) or the special names `best`, `bestvideo`, `bestaudio` and `worst`.
+By default youtube-dl tries to download the best available quality, i.e. if you want the best quality you **don't need** to pass any special options, youtube-dl will guess it for you by **default**.
+
+But sometimes you may want to download in a different format, for example when you are on a slow or intermittent connection. The key mechanism for achieving this is so called *format selection* based on which you can explicitly specify desired format, select formats based on some criterion or criteria, setup precedence and much more.
+
+The general syntax for format selection is `--format FORMAT` or shorter `-f FORMAT` where `FORMAT` is a *selector expression*, i.e. an expression that describes format or formats you would like to download.
+
+The simplest case is requesting a specific format, for example with `-f 22` you can download the format with format code equal to 22. You can get the list of available format codes for particular video using `--list-formats` or `-F`. Note that these format codes are extractor specific.
+
+You can also use a file extension (currently `3gp`, `aac`, `flv`, `m4a`, `mp3`, `mp4`, `ogg`, `wav`, `webm` are supported) to download best quality format of particular file extension served as a single file, e.g. `-f webm` will download best quality format with `webm` extension served as a single file.
+
+You can also use special names to select particular edge case format:
+ - `best`: Select best quality format represented by single file with video and audio
+ - `worst`: Select worst quality format represented by single file with video and audio
+ - `bestvideo`: Select best quality video only format (e.g. DASH video), may not be available
+ - `worstvideo`: Select worst quality video only format, may not be available
+ - `bestaudio`: Select best quality audio only format, may not be available
+ - `worstaudio`: Select worst quality audio only format, may not be available
+
+For example, to download worst quality video only format you can use `-f worstvideo`.
+
+If you want to download multiple videos and they don't have the same formats available, you can specify the order of preference using slashes. Note that slash is left-associative, i.e. formats on the left hand side are preferred, for example `-f 22/17/18` will download format 22 if it's available, otherwise it will download format 17 if it's available, otherwise it will download format 18 if it's available, otherwise it will complain that no suitable formats are available for download.
+
+If you want to download several formats of the same video use comma as a separator, e.g. `-f 22,17,18` will download all these three formats, of course if they are available. Or more sophisticated example combined with precedence feature `-f 136/137/mp4/bestvideo,140/m4a/bestaudio`.
+
+You can also filter the video formats by putting a condition in brackets, as in `-f "best[height=720]"` (or `-f "[filesize>10M]"`).
-If you want to download multiple videos and they don't have the same formats available, you can specify the order of preference using slashes, as in `-f 22/17/18`. You can also filter the video results by putting a condition in brackets, as in `-f "best[height=720]"` (or `-f "[filesize>10M]"`). This works for filesize, height, width, tbr, abr, vbr, asr, and fps and the comparisons <, <=, >, >=, =, != and for ext, acodec, vcodec, container, and protocol and the comparisons =, != . Formats for which the value is not known are excluded unless you put a question mark (?) after the operator. You can combine format filters, so `-f "[height <=? 720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 KBit/s. Use commas to download multiple formats, such as `-f 136/137/mp4/bestvideo,140/m4a/bestaudio`. You can merge the video and audio of two formats into a single file using `-f <video-format>+<audio-format>` (requires ffmpeg or avconv), for example `-f bestvideo+bestaudio`. Format selectors can also be grouped using parentheses, for example if you want to download the best mp4 and webm formats with a height lower than 480 you can use `-f '(mp4,webm)[height<480]'`.
+The following numeric meta fields can be used with comparisons `<`, `<=`, `>`, `>=`, `=` (equals), `!=` (not equals):
+ - `filesize`: The number of bytes, if known in advance
+ - `width`: Width of the video, if known
+ - `height`: Height of the video, if known
+ - `tbr`: Average bitrate of audio and video in KBit/s
+ - `abr`: Average audio bitrate in KBit/s
+ - `vbr`: Average video bitrate in KBit/s
+ - `asr`: Audio sampling rate in Hertz
+ - `fps`: Frame rate
-Since the end of April 2015 and version 2015.04.26 youtube-dl uses `-f bestvideo+bestaudio/best` as default format selection (see #5447, #5456). If ffmpeg or avconv are installed this results in downloading `bestvideo` and `bestaudio` separately and muxing them together into a single file giving the best overall quality available. Otherwise it falls back to `best` and results in downloading the best available quality served as a single file. `best` is also needed for videos that don't come from YouTube because they don't provide the audio and video in two different files. If you want to only download some dash formats (for example if you are not interested in getting videos with a resolution higher than 1080p), you can add `-f bestvideo[height<=?1080]+bestaudio/best` to your configuration file. Note that if you use youtube-dl to stream to `stdout` (and most likely to pipe it to your media player then), i.e. you explicitly specify output template as `-o -`, youtube-dl still uses `-f best` format selection in order to start content delivery immediately to your player and not to wait until `bestvideo` and `bestaudio` are downloaded and muxed.
+Also filtering work for comparisons `=` (equals), `!=` (not equals), `^=` (begins with), `$=` (ends with), `*=` (contains) and following string meta fields:
+ - `ext`: File extension
+ - `acodec`: Name of the audio codec in use
+ - `vcodec`: Name of the video codec in use
+ - `container`: Name of the container format
+ - `protocol`: The protocol that will be used for the actual download, lower-case. `http`, `https`, `rtsp`, `rtmp`, `rtmpe`, `m3u8`, or `m3u8_native`
+
+Note that none of the aforementioned meta fields are guaranteed to be present since this solely depends on the metadata obtained by particular extractor, i.e. the metadata offered by video hoster.
+
+Formats for which the value is not known are excluded unless you put a question mark (`?`) after the operator. You can combine format filters, so `-f "[height <=? 720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 KBit/s.
+
+You can merge the video and audio of two formats into a single file using `-f <video-format>+<audio-format>` (requires ffmpeg or avconv installed), for example `-f bestvideo+bestaudio` will download best video only format, best audio only format and mux them together with ffmpeg/avconv.
+
+Format selectors can also be grouped using parentheses, for example if you want to download the best mp4 and webm formats with a height lower than 480 you can use `-f '(mp4,webm)[height<480]'`.
+
+Since the end of April 2015 and version 2015.04.26 youtube-dl uses `-f bestvideo+bestaudio/best` as default format selection (see #5447, #5456). If ffmpeg or avconv are installed this results in downloading `bestvideo` and `bestaudio` separately and muxing them together into a single file giving the best overall quality available. Otherwise it falls back to `best` and results in downloading the best available quality served as a single file. `best` is also needed for videos that don't come from YouTube because they don't provide the audio and video in two different files. If you want to only download some DASH formats (for example if you are not interested in getting videos with a resolution higher than 1080p), you can add `-f bestvideo[height<=?1080]+bestaudio/best` to your configuration file. Note that if you use youtube-dl to stream to `stdout` (and most likely to pipe it to your media player then), i.e. you explicitly specify output template as `-o -`, youtube-dl still uses `-f best` format selection in order to start content delivery immediately to your player and not to wait until `bestvideo` and `bestaudio` are downloaded and muxed.
If you want to preserve the old format selection behavior (prior to youtube-dl 2015.04.26), i.e. you want to download the best available quality media served as a single file, you should explicitly specify your choice with `-f best`. You may want to add it to the [configuration file](#configuration) in order not to type it every time you run youtube-dl.
+Examples (note on Windows you may need to use double quotes instead of single):
+```bash
+# Download best mp4 format available or any other best if no mp4 available
+$ youtube-dl -f 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best'
+
+# Download best format available but not better that 480p
+$ youtube-dl -f 'bestvideo[height<=480]+bestaudio/best[height<=480]'
+
+# Download best video only format but no bigger that 50 MB
+$ youtube-dl -f 'best[filesize<50M]'
+
+# Download best format available via direct link over HTTP/HTTPS protocol
+$ youtube-dl -f '(bestvideo+bestaudio/best)[protocol^=http]'
+```
+
+
# VIDEO SELECTION
Videos can be filtered by their upload date using the options `--date`, `--datebefore` or `--dateafter`. They accept dates in two formats:
YouTube has switched to a new video info format in July 2011 which is not supported by old versions of youtube-dl. See [above](#how-do-i-update-youtube-dl) for how to update youtube-dl.
-### ERROR: unable to download video ###
+### ERROR: unable to download video
YouTube requires an additional signature since September 2012 which is not supported by old versions of youtube-dl. See [above](#how-do-i-update-youtube-dl) for how to update youtube-dl.
-### Video URL contains an ampersand and I'm getting some strange output `[1] 2839` or `'v' is not recognized as an internal or external command` ###
+### Video URL contains an ampersand and I'm getting some strange output `[1] 2839` or `'v' is not recognized as an internal or external command`
That's actually the output from your shell. Since ampersand is one of the special shell characters it's interpreted by the shell preventing you from passing the whole URL to youtube-dl. To disable your shell from interpreting the ampersands (or any other special characters) you have to either put the whole URL in quotes or escape them with a backslash (which approach will work depends on your shell).
These two error codes indicate that the service is blocking your IP address because of overuse. Contact the service and ask them to unblock your IP address, or - if you have acquired a whitelisted IP address already - use the [`--proxy` or `--source-address` options](#network-options) to select another IP address.
-### SyntaxError: Non-ASCII character ###
+### SyntaxError: Non-ASCII character
The error
Use the `-o` to specify an [output template](#output-template), for example `-o "/home/user/videos/%(title)s-%(id)s.%(ext)s"`. If you want this for all of your downloads, put the option into your [configuration file](#configuration).
-### How do I download a video starting with a `-` ?
+### How do I download a video starting with a `-`?
Either prepend `http://www.youtube.com/watch?v=` or separate the ID from the options with `--`:
Use the `--cookies` option, for example `--cookies /path/to/cookies/file.txt`. Note that the cookies file must be in Mozilla/Netscape format and the first line of the cookies file must be either `# HTTP Cookie File` or `# Netscape HTTP Cookie File`. Make sure you have correct [newline format](https://en.wikipedia.org/wiki/Newline) in the cookies file and convert newlines if necessary to correspond with your OS, namely `CRLF` (`\r\n`) for Windows, `LF` (`\n`) for Linux and `CR` (`\r`) for Mac OS. `HTTP Error 400: Bad Request` when using `--cookies` is a good sign of invalid newline format.
-Passing cookies to youtube-dl is a good way to workaround login when a particular extractor does not implement it explicitly.
+Passing cookies to youtube-dl is a good way to workaround login when a particular extractor does not implement it explicitly. Another use case is working around [CAPTCHA](https://en.wikipedia.org/wiki/CAPTCHA) some websites require you to solve in particular cases in order to get access (e.g. YouTube, CloudFlare).
### Can you add support for this anime video site, or site which shows current movies for free?
ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
```
-Most likely, you'll want to use various options. For a list of what can be done, have a look at [youtube_dl/YoutubeDL.py](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L117-L265). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
+Most likely, you'll want to use various options. For a list of what can be done, have a look at [`youtube_dl/YoutubeDL.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L121-L269). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
Here's a more complete example of a program that outputs only errors (and a short message after the download is finished), and downloads/converts the video to an mp3 file:
# BUGS
-Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues> . Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the irc channel #youtube-dl on freenode.
+Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues>. Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the IRC channel [#youtube-dl](irc://chat.freenode.net/#youtube-dl) on freenode ([webchat](http://webchat.freenode.net/?randomnick=1&channels=youtube-dl)).
-**Please include the full output of youtube-dl when run with `-v`**.
+**Please include the full output of youtube-dl when run with `-v`**, i.e. **add** `-v` flag to **your command line**, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
+```
+$ youtube-dl -v <your command line>
+[debug] System config: []
+[debug] User config: []
+[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
+[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
+[debug] youtube-dl version 2015.12.06
+[debug] Git HEAD: 135392e
+[debug] Python version 2.6.6 - Windows-2003Server-5.2.3790-SP2
+[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
+[debug] Proxy map: {}
+...
+```
+**Do not post screenshots of verbose log only plain text is acceptable.**
The output (including the first lines) contains important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
- How it could be fixed
- How your proposed solution would look like
-If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. As a commiter myself, I often get frustrated by these issues, since the only possible way for me to move forward on them is to ask for clarification over and over.
+If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. As a committer myself, I often get frustrated by these issues, since the only possible way for me to move forward on them is to ask for clarification over and over.
For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the `-v` flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
If your server has multiple IPs or you suspect censorship, adding `--call-home` may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
-**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL.
+**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like `http://www.youtube.com/watch?v=BaW_jenozKc`. There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. `http://www.youtube.com/`) is *not* an example URL.
### Are you using the latest version?
### Is the issue already documented?
-Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or at https://github.com/rg3/youtube-dl/search?type=Issues . If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity.
+Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or browse the [GitHub Issues](https://github.com/rg3/youtube-dl/search?type=Issues) of this repository. If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity.
### Why are existing options not enough?
youtube-dl is released into the public domain by the copyright holders.
-This README file was originally written by Daniel Bolton (<https://github.com/dbbolton>) and is likewise released into the public domain.
+This README file was originally written by [Daniel Bolton](https://github.com/dbbolton) and is likewise released into the public domain.
Alternatively, refer to the developer instructions for how to check out
and work with the git repository. For further options, including PGP
-signatures, see https://rg3.github.io/youtube-dl/download.html .
+signatures, see the youtube-dl Download Page.
expected filesize (experimental)
--hls-prefer-native Use the native HLS downloader instead of
ffmpeg (experimental)
+ --hls-use-mpegts Use the mpegts container for HLS videos,
+ allowing to play the video while
+ downloading (some players may not be able
+ to play it)
--external-downloader COMMAND Use the specified external downloader.
Currently supports
aria2c,axel,curl,httpie,wget
--all-formats Download all available video formats
--prefer-free-formats Prefer free video formats unless a specific
one is requested
- -F, --list-formats List all available formats of specified
+ -F, --list-formats List all available formats of requested
videos
--youtube-skip-dash-manifest Do not download the DASH manifests and
related data on YouTube videos
preference, for example: "srt" or
"ass/srt/best"
--sub-lang LANGS Languages of the subtitles to download
- (optional) separated by commas, use IETF
- language tags like 'en,pt'
+ (optional) separated by commas, use --list-
+ subs for available language tags
Authentication Options:
downloading, similar to find's -exec
syntax. Example: --exec 'adb push {}
/sdcard/Music/ && rm {}'
- --convert-subtitles FORMAT Convert the subtitles to other format
+ --convert-subs FORMAT Convert the subtitles to other format
(currently supported: srt|ass|vtt)
is a percent symbol followed by a name in parentheses, followed by a
lowercase S. Allowed names are:
-- id: The sequence will be replaced by the video identifier.
-- url: The sequence will be replaced by the video URL.
-- uploader: The sequence will be replaced by the nickname of the
- person who uploaded the video.
-- upload_date: The sequence will be replaced by the upload date in
- YYYYMMDD format.
-- title: The sequence will be replaced by the video title.
-- ext: The sequence will be replaced by the appropriate extension
- (like flv or mp4).
-- epoch: The sequence will be replaced by the Unix epoch when creating
- the file.
-- autonumber: The sequence will be replaced by a five-digit number
- that will be increased with each download, starting at zero.
-- playlist: The sequence will be replaced by the name or the id of the
- playlist that contains the video.
-- playlist_index: The sequence will be replaced by the index of the
- video in the playlist padded with leading zeros according to the
- total length of the playlist.
-- format_id: The sequence will be replaced by the format code
- specified by --format.
-- duration: The sequence will be replaced by the length of the video
- in seconds.
+- id: Video identifier
+- title: Video title
+- url: Video URL
+- ext: Video filename extension
+- alt_title: A secondary title of the video
+- display_id: An alternative identifier for the video
+- uploader: Full name of the video uploader
+- creator: The main artist who created the video
+- release_date: The date (YYYYMMDD) when the video was released
+- timestamp: UNIX timestamp of the moment the video became available
+- upload_date: Video upload date (YYYYMMDD)
+- uploader_id: Nickname or id of the video uploader
+- location: Physical location where the video was filmed
+- duration: Length of the video in seconds
+- view_count: How many users have watched the video on the platform
+- like_count: Number of positive ratings of the video
+- dislike_count: Number of negative ratings of the video
+- repost_count: Number of reposts of the video
+- average_rating: Average rating give by users, the scale used depends
+ on the webpage
+- comment_count: Number of comments on the video
+- age_limit: Age restriction for the video (years)
+- format: A human-readable description of the format
+- format_id: Format code specified by --format
+- format_note: Additional info about the format
+- width: Width of the video
+- height: Height of the video
+- resolution: Textual description of width and height
+- tbr: Average bitrate of audio and video in KBit/s
+- abr: Average audio bitrate in KBit/s
+- acodec: Name of the audio codec in use
+- asr: Audio sampling rate in Hertz
+- vbr: Average video bitrate in KBit/s
+- fps: Frame rate
+- vcodec: Name of the video codec in use
+- container: Name of the container format
+- filesize: The number of bytes, if known in advance
+- filesize_approx: An estimate for the number of bytes
+- protocol: The protocol that will be used for the actual download
+- extractor: Name of the extractor
+- extractor_key: Key name of the extractor
+- epoch: Unix epoch when creating the file
+- autonumber: Five-digit number that will be increased with each
+ download, starting at zero
+- playlist: Name or id of the playlist that contains the video
+- playlist_index: Index of the video in the playlist padded with
+ leading zeros according to the total length of the playlist
+
+Available for the video that belongs to some logical chapter or section:
+- chapter: Name or title of the chapter the video belongs to -
+chapter_number: Number of the chapter the video belongs to - chapter_id:
+Id of the chapter the video belongs to
+
+Available for the video that is an episode of some series or programme:
+- series: Title of the series or programme the video episode belongs to
+- season: Title of the season the video episode belongs to -
+season_number: Number of the season the video episode belongs to -
+season_id: Id of the season the video episode belongs to - episode:
+Title of the video episode - episode_number: Number of the video episode
+within a season - episode_id: Id of the video episode
+
+Each aforementioned sequence when referenced in output template will be
+replaced by the actual value corresponding to the sequence name. Note
+that some of the sequences are not guaranteed to be present since they
+depend on the metadata obtained by particular extractor, such sequences
+will be replaced with NA.
+
+For example for -o %(title)s-%(id)s.%(ext)s and mp4 video with title
+youtube-dl test video and id BaW_jenozKcj this will result in a
+youtube-dl test video-BaW_jenozKcj.mp4 file created in the current
+directory.
+
+Output template can also contain arbitrary hierarchical path, e.g.
+-o '%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s' that will
+result in downloading each video in a directory corresponding to this
+path template. Any missing directory will be automatically created for
+you.
+
+To specify percent literal in output template use %%. To output to
+stdout use -o -.
The current default template is %(title)s-%(id)s.%(ext)s.
or the filename through an 8bit-unsafe channel. In these cases, add the
--restrict-filenames flag to get a shorter title:
-``` {.bash}
-$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
-youtube-dl test video ''_ä↭𝕐.mp4 # All kinds of weird characters
-$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
-youtube-dl_test_video_.mp4 # A simple file name
-```
+Examples (note on Windows you may need to use double quotes instead of
+single):
+
+ $ youtube-dl --get-filename -o '%(title)s.%(ext)s' BaW_jenozKc
+ youtube-dl test video ''_ä↭𝕐.mp4 # All kinds of weird characters
+
+ $ youtube-dl --get-filename -o '%(title)s.%(ext)s' BaW_jenozKc --restrict-filenames
+ youtube-dl_test_video_.mp4 # A simple file name
+
+ # Download YouTube playlist videos in separate directory indexed by video order in a playlist
+ $ youtube-dl -o '%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s' https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re
+
+ # Download Udemy course keeping each chapter in separate directory under MyVideos directory in your home
+ $ youtube-dl -u user -p password -o '~/MyVideos/%(playlist)s/%(chapter_number)s - %(chapter)s/%(title)s.%(ext)s' https://www.udemy.com/java-tutorial/
+
+ # Download entire series season keeping each series and each season in separate directory under C:/MyVideos
+ $ youtube-dl -o "C:/MyVideos/%(series)s/%(season_number)s - %(season)s/%(episode_number)s - %(episode)s.%(ext)s" http://videomore.ru/kino_v_detalayah/5_sezon/367617
+
+ # Stream the video being downloaded to stdout
+ $ youtube-dl -o - BaW_jenozKc
FORMAT SELECTION
-By default youtube-dl tries to download the best quality, but sometimes
-you may want to download in a different format. The simplest case is
-requesting a specific format, for example -f 22. You can get the list of
-available formats using --list-formats, you can also use a file
-extension (currently it supports aac, m4a, mp3, mp4, ogg, wav, webm) or
-the special names best, bestvideo, bestaudio and worst.
+By default youtube-dl tries to download the best available quality, i.e.
+if you want the best quality you DON'T NEED to pass any special options,
+youtube-dl will guess it for you by DEFAULT.
+
+But sometimes you may want to download in a different format, for
+example when you are on a slow or intermittent connection. The key
+mechanism for achieving this is so called _format selection_ based on
+which you can explicitly specify desired format, select formats based on
+some criterion or criteria, setup precedence and much more.
+
+The general syntax for format selection is --format FORMAT or shorter
+-f FORMAT where FORMAT is a _selector expression_, i.e. an expression
+that describes format or formats you would like to download.
+
+The simplest case is requesting a specific format, for example with
+-f 22 you can download the format with format code equal to 22. You can
+get the list of available format codes for particular video using
+--list-formats or -F. Note that these format codes are extractor
+specific.
+
+You can also use a file extension (currently 3gp, aac, flv, m4a, mp3,
+mp4, ogg, wav, webm are supported) to download best quality format of
+particular file extension served as a single file, e.g. -f webm will
+download best quality format with webm extension served as a single
+file.
+
+You can also use special names to select particular edge case format: -
+best: Select best quality format represented by single file with video
+and audio - worst: Select worst quality format represented by single
+file with video and audio - bestvideo: Select best quality video only
+format (e.g. DASH video), may not be available - worstvideo: Select
+worst quality video only format, may not be available - bestaudio:
+Select best quality audio only format, may not be available -
+worstaudio: Select worst quality audio only format, may not be available
+
+For example, to download worst quality video only format you can use
+-f worstvideo.
If you want to download multiple videos and they don't have the same
formats available, you can specify the order of preference using
-slashes, as in -f 22/17/18. You can also filter the video results by
-putting a condition in brackets, as in -f "best[height=720]" (or
--f "[filesize>10M]"). This works for filesize, height, width, tbr, abr,
-vbr, asr, and fps and the comparisons <, <=, >, >=, =, != and for ext,
-acodec, vcodec, container, and protocol and the comparisons =, != .
+slashes. Note that slash is left-associative, i.e. formats on the left
+hand side are preferred, for example -f 22/17/18 will download format 22
+if it's available, otherwise it will download format 17 if it's
+available, otherwise it will download format 18 if it's available,
+otherwise it will complain that no suitable formats are available for
+download.
+
+If you want to download several formats of the same video use comma as a
+separator, e.g. -f 22,17,18 will download all these three formats, of
+course if they are available. Or more sophisticated example combined
+with precedence feature -f 136/137/mp4/bestvideo,140/m4a/bestaudio.
+
+You can also filter the video formats by putting a condition in
+brackets, as in -f "best[height=720]" (or -f "[filesize>10M]").
+
+The following numeric meta fields can be used with comparisons <, <=, >,
+>=, = (equals), != (not equals): - filesize: The number of bytes, if
+known in advance - width: Width of the video, if known - height: Height
+of the video, if known - tbr: Average bitrate of audio and video in
+KBit/s - abr: Average audio bitrate in KBit/s - vbr: Average video
+bitrate in KBit/s - asr: Audio sampling rate in Hertz - fps: Frame rate
+
+Also filtering work for comparisons = (equals), != (not equals), ^=
+(begins with), $= (ends with), *= (contains) and following string meta
+fields: - ext: File extension - acodec: Name of the audio codec in use -
+vcodec: Name of the video codec in use - container: Name of the
+container format - protocol: The protocol that will be used for the
+actual download, lower-case. http, https, rtsp, rtmp, rtmpe, m3u8, or
+m3u8_native
+
+Note that none of the aforementioned meta fields are guaranteed to be
+present since this solely depends on the metadata obtained by particular
+extractor, i.e. the metadata offered by video hoster.
+
Formats for which the value is not known are excluded unless you put a
question mark (?) after the operator. You can combine format filters, so
-f "[height <=? 720][tbr>500]" selects up to 720p videos (or videos
where the height is not known) with a bitrate of at least 500 KBit/s.
-Use commas to download multiple formats, such as
--f 136/137/mp4/bestvideo,140/m4a/bestaudio. You can merge the video and
-audio of two formats into a single file using
--f <video-format>+<audio-format> (requires ffmpeg or avconv), for
-example -f bestvideo+bestaudio. Format selectors can also be grouped
-using parentheses, for example if you want to download the best mp4 and
-webm formats with a height lower than 480 you can use
--f '(mp4,webm)[height<480]'.
+
+You can merge the video and audio of two formats into a single file
+using -f <video-format>+<audio-format> (requires ffmpeg or avconv
+installed), for example -f bestvideo+bestaudio will download best video
+only format, best audio only format and mux them together with
+ffmpeg/avconv.
+
+Format selectors can also be grouped using parentheses, for example if
+you want to download the best mp4 and webm formats with a height lower
+than 480 you can use -f '(mp4,webm)[height<480]'.
Since the end of April 2015 and version 2015.04.26 youtube-dl uses
-f bestvideo+bestaudio/best as default format selection (see #5447,
falls back to best and results in downloading the best available quality
served as a single file. best is also needed for videos that don't come
from YouTube because they don't provide the audio and video in two
-different files. If you want to only download some dash formats (for
+different files. If you want to only download some DASH formats (for
example if you are not interested in getting videos with a resolution
higher than 1080p), you can add
-f bestvideo[height<=?1080]+bestaudio/best to your configuration file.
your choice with -f best. You may want to add it to the configuration
file in order not to type it every time you run youtube-dl.
+Examples (note on Windows you may need to use double quotes instead of
+single):
+
+ # Download best mp4 format available or any other best if no mp4 available
+ $ youtube-dl -f 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best'
+
+ # Download best format available but not better that 480p
+ $ youtube-dl -f 'bestvideo[height<=480]+bestaudio/best[height<=480]'
+
+ # Download best video only format but no bigger that 50 MB
+ $ youtube-dl -f 'best[filesize<50M]'
+
+ # Download best format available via direct link over HTTP/HTTPS protocol
+ $ youtube-dl -f '(bestvideo+bestaudio/best)[protocol^=http]'
+
VIDEO SELECTION
Examples:
-``` {.bash}
-# Download only the videos uploaded in the last 6 months
-$ youtube-dl --dateafter now-6months
+ # Download only the videos uploaded in the last 6 months
+ $ youtube-dl --dateafter now-6months
-# Download only the videos uploaded on January 1, 1970
-$ youtube-dl --date 19700101
+ # Download only the videos uploaded on January 1, 1970
+ $ youtube-dl --date 19700101
-$ # Download only the videos uploaded in the 200x decade
-$ youtube-dl --dateafter 20000101 --datebefore 20091231
-```
+ $ # Download only the videos uploaded in the 200x decade
+ $ youtube-dl --dateafter 20000101 --datebefore 20091231
-o "/home/user/videos/%(title)s-%(id)s.%(ext)s". If you want this for
all of your downloads, put the option into your configuration file.
-How do I download a video starting with a - ?
+How do I download a video starting with a -?
Either prepend http://www.youtube.com/watch?v= or separate the ID from
the options with --:
when using --cookies is a good sign of invalid newline format.
Passing cookies to youtube-dl is a good way to workaround login when a
-particular extractor does not implement it explicitly.
+particular extractor does not implement it explicitly. Another use case
+is working around CAPTCHA some websites require you to solve in
+particular cases in order to get access (e.g. YouTube, CloudFlare).
Can you add support for this anime video site, or site which shows current movies for free?
4. Start with this simple template and save it to
youtube_dl/extractor/yourextractor.py:
- ``` {.python}
- # coding: utf-8
- from __future__ import unicode_literals
-
- from .common import InfoExtractor
-
-
- class YourExtractorIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
- _TEST = {
- 'url': 'http://yourextractor.com/watch/42',
- 'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
- 'info_dict': {
- 'id': '42',
- 'ext': 'mp4',
- 'title': 'Video title goes here',
- 'thumbnail': 're:^https?://.*\.jpg$',
- # TODO more properties, either as:
- # * A value
- # * MD5 checksum; start the string with md5:
- # * A regular expression; start the string with re:
- # * Any Python type (for example int or float)
+ # coding: utf-8
+ from __future__ import unicode_literals
+
+ from .common import InfoExtractor
+
+
+ class YourExtractorIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://yourextractor.com/watch/42',
+ 'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
+ 'info_dict': {
+ 'id': '42',
+ 'ext': 'mp4',
+ 'title': 'Video title goes here',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ # TODO more properties, either as:
+ # * A value
+ # * MD5 checksum; start the string with md5:
+ # * A regular expression; start the string with re:
+ # * Any Python type (for example int or float)
+ }
}
- }
- def _real_extract(self, url):
- video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
- # TODO more code goes here, for example ...
- title = self._html_search_regex(r'<h1>(.+?)</h1>', webpage, 'title')
+ # TODO more code goes here, for example ...
+ title = self._html_search_regex(r'<h1>(.+?)</h1>', webpage, 'title')
- return {
- 'id': video_id,
- 'title': title,
- 'description': self._og_search_description(webpage),
- 'uploader': self._search_regex(r'<div[^>]+id="uploader"[^>]*>([^<]+)<', webpage, 'uploader', fatal=False),
- # TODO more properties (see youtube_dl/extractor/common.py)
- }
- ```
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': self._og_search_description(webpage),
+ 'uploader': self._search_regex(r'<div[^>]+id="uploader"[^>]*>([^<]+)<', webpage, 'uploader', fatal=False),
+ # TODO more properties (see youtube_dl/extractor/common.py)
+ }
5. Add an import in youtube_dl/extractor/__init__.py.
6. Run python test/test_download.py TestDownload.test_YourExtractor.
From a Python program, you can embed youtube-dl in a more powerful
fashion, like this:
-``` {.python}
-from __future__ import unicode_literals
-import youtube_dl
+ from __future__ import unicode_literals
+ import youtube_dl
-ydl_opts = {}
-with youtube_dl.YoutubeDL(ydl_opts) as ydl:
- ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
-```
+ ydl_opts = {}
+ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
+ ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
Most likely, you'll want to use various options. For a list of what can
be done, have a look at youtube_dl/YoutubeDL.py. For a start, if you
(and a short message after the download is finished), and
downloads/converts the video to an mp3 file:
-``` {.python}
-from __future__ import unicode_literals
-import youtube_dl
+ from __future__ import unicode_literals
+ import youtube_dl
-class MyLogger(object):
- def debug(self, msg):
- pass
+ class MyLogger(object):
+ def debug(self, msg):
+ pass
- def warning(self, msg):
- pass
+ def warning(self, msg):
+ pass
- def error(self, msg):
- print(msg)
+ def error(self, msg):
+ print(msg)
-def my_hook(d):
- if d['status'] == 'finished':
- print('Done downloading, now converting ...')
+ def my_hook(d):
+ if d['status'] == 'finished':
+ print('Done downloading, now converting ...')
-ydl_opts = {
- 'format': 'bestaudio/best',
- 'postprocessors': [{
- 'key': 'FFmpegExtractAudio',
- 'preferredcodec': 'mp3',
- 'preferredquality': '192',
- }],
- 'logger': MyLogger(),
- 'progress_hooks': [my_hook],
-}
-with youtube_dl.YoutubeDL(ydl_opts) as ydl:
- ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
-```
+ ydl_opts = {
+ 'format': 'bestaudio/best',
+ 'postprocessors': [{
+ 'key': 'FFmpegExtractAudio',
+ 'preferredcodec': 'mp3',
+ 'preferredquality': '192',
+ }],
+ 'logger': MyLogger(),
+ 'progress_hooks': [my_hook],
+ }
+ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
+ ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
Bugs and suggestions should be reported at:
-https://github.com/rg3/youtube-dl/issues . Unless you were prompted so
-or there is another pertinent reason (e.g. GitHub fails to accept the
-bug report), please do not send bug reports via personal email. For
-discussions, join us in the irc channel #youtube-dl on freenode.
-
-PLEASE INCLUDE THE FULL OUTPUT OF YOUTUBE-DL WHEN RUN WITH -v.
+https://github.com/rg3/youtube-dl/issues. Unless you were prompted so or
+there is another pertinent reason (e.g. GitHub fails to accept the bug
+report), please do not send bug reports via personal email. For
+discussions, join us in the IRC channel #youtube-dl on freenode
+(webchat).
+
+PLEASE INCLUDE THE FULL OUTPUT OF YOUTUBE-DL WHEN RUN WITH -v, i.e. ADD
+-v flag to YOUR COMMAND LINE, copy the WHOLE output and post it in the
+issue body wrapped in ``` for better formatting. It should look similar
+to this:
+
+ $ youtube-dl -v <your command line>
+ [debug] System config: []
+ [debug] User config: []
+ [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
+ [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
+ [debug] youtube-dl version 2015.12.06
+ [debug] Git HEAD: 135392e
+ [debug] Python version 2.6.6 - Windows-2003Server-5.2.3790-SP2
+ [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
+ [debug] Proxy map: {}
+ ...
+
+DO NOT POST SCREENSHOTS OF VERBOSE LOG ONLY PLAIN TEXT IS ACCEPTABLE.
The output (including the first lines) contains important debugging
information. Issues without the full output are often not reproducible
If your report is shorter than two lines, it is almost certainly missing
some of these, which makes it hard for us to respond to it. We're often
too polite to close the issue outright, but the missing info makes
-misinterpretation likely. As a commiter myself, I often get frustrated
+misinterpretation likely. As a committer myself, I often get frustrated
by these issues, since the only possible way for me to move forward on
them is to ask for clarification over and over.
SITE SUPPORT REQUESTS MUST CONTAIN AN EXAMPLE URL. An example URL is a
URL you might want to download, like
-http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious
+http://www.youtube.com/watch?v=BaW_jenozKc. There should be an obvious
video present. Except under very special circumstances, the main page of
-a video service (e.g. http://www.youtube.com/ ) is _not_ an example URL.
+a video service (e.g. http://www.youtube.com/) is _not_ an example URL.
Are you using the latest version?
Is the issue already documented?
Make sure that someone has not already opened the issue you're trying to
-open. Search at the top of the window or at
-https://github.com/rg3/youtube-dl/search?type=Issues . If there is an
-issue, feel free to write something along the lines of "This affects me
-as well, with version 2015.01.01. Here is some more information on the
-issue: ...". While some issues may be old, a new post into them often
-spurs rapid activity.
+open. Search at the top of the window or browse the GitHub Issues of
+this repository. If there is an issue, feel free to write something
+along the lines of "This affects me as well, with version 2015.01.01.
+Here is some more information on the issue: ...". While some issues may
+be old, a new post into them often spurs rapid activity.
Why are existing options not enough?
youtube-dl is released into the public domain by the copyright holders.
-This README file was originally written by Daniel Bolton
-(https://github.com/dbbolton) and is likewise released into the public
-domain.
+This README file was originally written by Daniel Bolton and is likewise
+released into the public domain.
import datetime
import glob
-import io # For Python 2 compatibilty
+import io # For Python 2 compatibility
import os
import re
# Supported sites
- **1tv**: Первый канал
- **1up.com**
+ - **20min**
- **220.ro**
- **22tracks:genre**
- **22tracks:track**
- **abc.net.au**
- **Abc7News**
- **AcademicEarth:Course**
+ - **acast**
+ - **acast:channel**
- **AddAnime**
- **AdobeTV**
+ - **AdobeTVChannel**
+ - **AdobeTVShow**
- **AdobeTVVideo**
- **AdultSwim**
- - **Aftenposten**
+ - **aenetworks**: A+E Networks: A&E, Lifetime, History.com, FYI Network
- **Aftonbladet**
- **AirMozilla**
- **AlJazeera**
- **Allocine**
- **AlphaPorno**
+ - **AnimeOnDemand**
- **anitube.se**
- **AnySex**
- **Aparat**
- **AppleConnect**
- **AppleDaily**: 臺灣蘋果日報
- - **AppleTrailers**
+ - **appletrailers**
+ - **appletrailers:section**
- **archive.org**: archive.org videos
- **ARD**
+ - **ARD:mediathek**: Saarländischer Rundfunk
- **ARD:mediathek**
- **arte.tv**
- **arte.tv:+7**
+ - **arte.tv:cinema**
- **arte.tv:concert**
- **arte.tv:creative**
- **arte.tv:ddc**
- **arte.tv:embed**
- **arte.tv:future**
+ - **arte.tv:magazine**
- **AtresPlayer**
- **ATTTechChannel**
+ - **AudiMedia**
- **audiomack**
- **audiomack:album**
- **Azubu**
+ - **AzubuLive**
- **BaiduVideo**: 百度视频
- **bambuser**
- **bambuser:channel**
- **Beeg**
- **BehindKink**
- **Bet**
+ - **Bigflix**
- **Bild**: Bild.de
- **BiliBili**
+ - **BleacherReport**
+ - **BleacherReportCMS**
- **blinkx**
- - **blip.tv:user**
- - **BlipTV**
- **Bloomberg**
- **Bpb**: Bundeszentrale für politische Bildung
- **BR**: Bayerischer Rundfunk Mediathek
- **BYUtv**
- **Camdemy**
- **CamdemyFolder**
- - **Canal13cl**
- **canalc2.tv**
- **Canalplus**: canalplus.fr, piwiplus.fr and d8.tv
+ - **Canvas**
+ - **CBC**
+ - **CBCPlayer**
- **CBS**
- **CBSNews**: CBS News
+ - **CBSNewsLiveVideo**: CBS News Live Videos
- **CBSSports**
- **CeskaTelevize**
- **channel9**: Channel 9
- **Clipfish**
- **cliphunter**
- **Clipsyndicate**
+ - **cloudtime**: CloudTime
- **Cloudy**
- **Clubic**
- **Clyp**
- **ComedyCentralShows**: The Daily Show / The Colbert Report
- **CondeNast**: Condé Nast media group: Allure, Architectural Digest, Ars Technica, Bon Appétit, Brides, Condé Nast, Condé Nast Traveler, Details, Epicurious, GQ, Glamour, Golf Digest, SELF, Teen Vogue, The New Yorker, Vanity Fair, Vogue, W Magazine, WIRED
- **Cracked**
+ - **Crackle**
- **Criterion**
- **CrooksAndLiars**
- **Crunchyroll**
- **CSpan**: C-SPAN
- **CtsNews**: 華視新聞
- **culturebox.francetvinfo.fr**
+ - **CultureUnplugged**
+ - **CWTV**
- **dailymotion**
- **dailymotion:playlist**
- **dailymotion:user**
- **DailymotionCloud**
- **daum.net**
+ - **daum.net:clip**
+ - **daum.net:playlist**
+ - **daum.net:user**
- **DBTV**
- **DCN**
+ - **dcn:live**
+ - **dcn:season**
+ - **dcn:video**
- **DctpTv**
- **DeezerPlaylist**
- **defense.gouv.fr**
- **democracynow**
- **DHM**: Filmarchiv - Deutsches Historisches Museum
+ - **Digiteka**
- **Discovery**
- **Dotsub**
- **DouyuTV**: 斗鱼
- **Eporner**
- **EroProfile**
- **Escapist**
- - **ESPN** (Currently broken)
+ - **ESPN**
- **EsriVideo**
- **Europa**
- **EveryonesMixtape**
- **ExpoTV**
- **ExtremeTube**
- **facebook**
+ - **facebook:post**
- **faz.net**
- **fc2**
- **Fczenit**
- **Flickr**
- **Folketinget**: Folketinget (ft.dk; Danish parliament)
- **FootyRoom**
+ - **FOX**
- **Foxgay**
- **FoxNews**: Fox News and Fox Business Video
- **FoxSports**
- **france2.fr:generation-quoi**
- **FranceCulture**
+ - **FranceCultureEmission**
- **FranceInter**
- **francetv**: France 2, 3, 4, 5 and Ô
- **francetvinfo.fr**
- **Freesound**
- **freespeech.org**
- **FreeVideo**
+ - **Funimation**
- **FunnyOrDie**
+ - **GameInformer**
- **Gamekings**
- **GameOne**
- **gameone:playlist**
- **GodTube**
- **GoldenMoustache**
- **Golem**
+ - **GoogleDrive**
- **Goshgay**
+ - **GPUTechConf**
- **Groupon**
- **Hark**
- **HearThisAt**
- **Helsinki**: helsinki.fi
- **HentaiStigma**
- **HistoricFilms**
- - **History**
- **hitbox**
- **hitbox:live**
- **HornBunny**
- **HotNewHipHop**
+ - **HotStar**
- **Howcast**
- **HowStuffWorks**
- **HuffPost**: Huffington Post
- **Ir90Tv**
- **ivi**: ivi.ru
- **ivi:compilation**: ivi.ru compilations
+ - **ivideon**: Ivideon TV
- **Izlesene**
- **JadoreCettePub**
- **JeuxVideo**
- **Jove**
- **jpopsuki.tv**
- - **Jukebox**
+ - **JWPlatform**
- **Kaltura**
- **KanalPlay**: Kanal 5/9/11 Play
- **Kankan**
- **KeezMovies**
- **KhanAcademy**
- **KickStarter**
+ - **KonserthusetPlay**
- **kontrtube**: KontrTube.ru - Труба зовёт
- **KrasView**: Красвью
- **Ku6**
- **la7.tv**
- **Laola1Tv**
- **Lecture2Go**
+ - **Lemonde**
- **Letv**: 乐视网
+ - **LetvCloud**: 乐视云
- **LetvPlaylist**
- **LetvTv**
- **Libsyn**
- **livestream**
- **livestream:original**
- **LnkGo**
+ - **LoveHomePorn**
- **lrt.lt**
- **lynda**: lynda.com videos
- **lynda:course**: lynda.com online courses
- **m6**
- **macgamestore**: MacGameStore trailers
- **mailru**: Видео@Mail.Ru
+ - **MakerTV**
- **Malemotion**
+ - **MatchTV**
- **MDR**: MDR.DE and KiKA
- **media.ccc.de**
- **metacafe**
- **MovieClips**
- **MovieFap**
- **Moviezine**
- - **movshare**: MovShare
- **MPORA**
- **MSNBC**
- **MTV**
- **MySpace:album**
- **MySpass**
- **Myvi**
- - **myvideo**
+ - **myvideo** (Currently broken)
- **MyVidster**
- **n-tv.de**
- **NationalGeographic**
- **Newstube**
- **NextMedia**: 蘋果日報
- **NextMediaActionNews**: 蘋果日報 - 動新聞
+ - **nextmovie.com**
- **nfb**: National Film Board of Canada
- **nfl.com**
- **nhl.com**
- **nhl.com:news**: NHL news
- **nhl.com:videocenter**: NHL videocenter category
+ - **nick.com**
- **niconico**: ニコニコ動画
- **NiconicoPlaylist**
- **njoy**: N-JOY
- **nowness**
- **nowness:playlist**
- **nowness:series**
- - **NowTV**
+ - **NowTV** (Currently broken)
- **NowTVList**
- **nowvideo**: NowVideo
+ - **Noz**
- **npo**: npo.nl and ntr.nl
- **npo.nl:live**
- **npo.nl:radio**
- **npo.nl:radio:fragment**
+ - **Npr**
- **NRK**
- **NRKPlaylist**
- **NRKTV**: NRK TV and NRK Radio
- **OnionStudios**
- **Ooyala**
- **OoyalaExternal**
+ - **OraTV**
- **orf:fm4**: radio FM4
- **orf:iptv**: iptv.ORF.at
- **orf:oe1**: Radio Österreich 1
- **orf:tvthek**: ORF TVthek
+ - **pandora.tv**: 판도라TV
- **parliamentlive.tv**: UK parliament videos
- **Patreon**
- - **PBS**
+ - **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
+ - **pcmag**
- **Periscope**: Periscope
- **PhilharmonieDeParis**: Philharmonie de Paris
- - **Phoenix**
+ - **phoenix.de**
- **Photobucket**
- **Pinkbike**
- **Pladform**
- **PlanetaPlay**
- **play.fm**
- **played.to**
+ - **PlaysTV**
- **Playtvak**: Playtvak.cz, iDNES.cz and Lidovky.cz
- **Playvid**
- **Playwire**
- **PornHd**
- **PornHub**
- **PornHubPlaylist**
+ - **PornHubUserVideos**
- **Pornotube**
- **PornoVoisines**
- **PornoXO**
- **radiofrance**
- **RadioJavan**
- **Rai**
+ - **RaiTV**
- **RBMARadio**
- **RDS**: RDS.ca
- **RedTube**
+ - **RegioTV**
- **Restudy**
- **ReverbNation**
+ - **Revision3**
- **RingTV**
- **RottenTomatoes**
- **Roxwel**
- **RTBF**
- - **Rte**
+ - **rte**: Raidió Teilifís Éireann TV
+ - **rte:radio**: Raidió Teilifís Éireann radio
- **rtl.nl**: rtl.nl and rtlxl.nl
- **RTL2**
- **RTP**
- **rtve.es:live**: RTVE.es live streams
- **RTVNH**
- **RUHD**
+ - **RulePorn**
- **rutube**: Rutube videos
- **rutube:channel**: Rutube channels
- **rutube:embed**: Rutube embedded videos
- **Sapo**: SAPO Vídeos
- **savefrom.net**
- **SBS**: sbs.com.au
+ - **schooltv**
- **SciVee**
- **screen.yahoo:search**: Yahoo screen search
- **Screencast**
- **ScreencastOMatic**
+ - **ScreenJunkies**
- **ScreenwaveMedia**
- **SenateISVP**
- **ServingSys**
- **Shared**: shared.sx and vivo.sx
- **ShareSix**
- **Sina**
+ - **skynewsarabia:video**
+ - **skynewsarabia:video**
- **Slideshare**
- **Slutload**
- **smotri**: Smotri.com
- **SnagFilmsEmbed**
- **Snotr**
- **Sohu**
- - **soompi**
- - **soompi:show**
- **soundcloud**
- **soundcloud:playlist**
- **soundcloud:search**: Soundcloud search
- **SportBoxEmbed**
- **SportDeutschland**
- **Sportschau**
- - **Srf**
- - **SRMediathek**: Saarländischer Rundfunk
+ - **SRGSSR**
+ - **SRGSSRPlay**: srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites
- **SSA**
- **stanfordoc**: Stanford Open ClassRoom
- **Steam**
- **TechTalks**
- **techtv.mit.edu**
- **ted**
+ - **Tele13**
- **TeleBruxelles**
- **Telecinco**: telecinco.es, cuatro.com and mediaset.es
- **Telegraaf**
- **TeleMB**
- **TeleTask**
- **TenPlay**
- - **TestTube**
- **TF1**
+ - **TheIntercept**
- **TheOnion**
- **ThePlatform**
- **ThePlatformFeed**
- **THVideo**
- **THVideoPlaylist**
- **tinypic**: tinypic.com videos
- - **tlc.com**
- **tlc.de**
- **TMZ**
- **TMZArticle**
- **TNAFlix**
+ - **toggle**
- **tou.tv**
- **Toypics**: Toypics user profile
- **ToypicsUser**: Toypics user profile
- **TrailerAddict** (Currently broken)
- **Trilulilu**
+ - **trollvids**
- **TruTube**
- **Tube8**
- **TubiTv**
- - **Tudou**
+ - **tudou**
+ - **tudou:album**
+ - **tudou:playlist**
- **Tumblr**
- - **TuneIn**
+ - **tunein:clip**
+ - **tunein:program**
+ - **tunein:station**
+ - **tunein:topic**
- **Turbo**
- **Tutv**
- **tv.dfb.de**
- **TVC**
- **TVCArticle**
- **tvigle**: Интернет-телевидение Tvigle.ru
+ - **tvland.com**
- **tvp.pl**
- **tvp.pl:Series**
- **TVPlay**: TV3Play and related services
- **twitch:video**
- **twitch:vod**
- **twitter**
+ - **twitter:amplify**
- **twitter:card**
- **Ubu**
- **udemy**
- **udemy:course**
- **UDNEmbed**: 聯合影音
- - **Ultimedia**
- **Unistra**
- **Urort**: NRK P3 Urørt
- **ustream**
- **Vessel**
- **Vesti**: Вести.Ru
- **Vevo**
- - **VGTV**: VGTV and BTTV
+ - **VGTV**: VGTV, BTTV, FTV, Aftenposten and Aftonbladet
- **vh1.com**
- **Vice**
- **Viddler**
- **video.mit.edu**
- **VideoDetective**
- **videofy.me**
- - **VideoMega**
+ - **VideoMega** (Currently broken)
+ - **videomore**
+ - **videomore:season**
+ - **videomore:video**
- **VideoPremium**
- - **VideoTt**: video.tt - Your True Tube
+ - **VideoTt**: video.tt - Your True Tube (Currently broken)
- **videoweed**: VideoWeed
- - **Vidme**
+ - **vidme**
+ - **vidme:user**
+ - **vidme:user:likes**
- **Vidzi**
- **vier**
- **vier:videos**
- **WebOfStories**
- **WebOfStoriesPlaylist**
- **Weibo**
+ - **WeiqiTV**: WQTV
+ - **wholecloud**: WholeCloud
- **Wimp**
- **Wistia**
- **WNL**
- **youtube:favorites**: YouTube.com favourite videos, ":ytfav" for short (requires authentication)
- **youtube:history**: Youtube watch history, ":ythistory" for short (requires authentication)
- **youtube:playlist**: YouTube.com playlists
+ - **youtube:playlists**: YouTube.com user/channel playlists
- **youtube:recommended**: YouTube.com recommended videos, ":ytrec" for short (requires authentication)
- **youtube:search**: YouTube.com searches
- **youtube:search:date**: YouTube.com searches, newest videos first
- **youtube:show**: YouTube.com (multi-season) shows
- **youtube:subscriptions**: YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)
- **youtube:user**: YouTube.com user videos (URL or "ytuser" keyword)
- - **youtube:user:playlists**: YouTube.com user playlists
- **youtube:watchlater**: Youtube watch later list, ":ytwatchlater" for short (requires authentication)
- **Zapiks**
- **ZDF**
- **ZDFChannel**
- **zingmp3:album**: mp3.zing.vn albums
- **zingmp3:song**: mp3.zing.vn songs
+ - **ZippCast**
from test.helper import FakeYDL, assertRegexpMatches
from youtube_dl import YoutubeDL
-from youtube_dl.compat import compat_str
+from youtube_dl.compat import compat_str, compat_urllib_error
from youtube_dl.extractor import YoutubeIE
+from youtube_dl.extractor.common import InfoExtractor
from youtube_dl.postprocessor.common import PostProcessor
from youtube_dl.utils import ExtractorError, match_filter_func
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'dash-video-low')
+ formats = [
+ {'format_id': 'vid-vcodec-dot', 'ext': 'mp4', 'preference': 1, 'vcodec': 'avc1.123456', 'acodec': 'none', 'url': TEST_URL},
+ ]
+ info_dict = _make_result(formats)
+
+ ydl = YDL({'format': 'bestvideo[vcodec=avc1.123456]'})
+ ydl.process_ie_result(info_dict.copy())
+ downloaded = ydl.downloaded_info_dicts[0]
+ self.assertEqual(downloaded['format_id'], 'vid-vcodec-dot')
+
def test_youtube_format_selection(self):
order = [
- '38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '36', '17', '13',
+ '38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '17', '36', '13',
# Apple HTTP Live Streaming
'96', '95', '94', '93', '92', '132', '151',
# 3D
def format_info(f_id):
info = YoutubeIE._formats[f_id].copy()
+
+ # XXX: In real cases InfoExtractor._parse_mpd_formats() fills up 'acodec'
+ # and 'vcodec', while in tests such information is incomplete since
+ # commit a6c2c24479e5f4827ceb06f64d855329c0a6f593
+ # test_YoutubeDL.test_youtube_format_selection is broken without
+ # this fix
+ if 'acodec' in info and 'vcodec' not in info:
+ info['vcodec'] = 'none'
+ elif 'vcodec' in info and 'acodec' not in info:
+ info['acodec'] = 'none'
+
info['format_id'] = f_id
info['url'] = 'url:' + f_id
return info
result = get_ids({'playlist_items': '10'})
self.assertEqual(result, [])
+ def test_urlopen_no_file_protocol(self):
+ # see https://github.com/rg3/youtube-dl/issues/8227
+ ydl = YDL()
+ self.assertRaises(compat_urllib_error.URLError, ydl.urlopen, 'file:///etc/passwd')
+
+ def test_do_not_override_ie_key_in_url_transparent(self):
+ ydl = YDL()
+
+ class Foo1IE(InfoExtractor):
+ _VALID_URL = r'foo1:'
+
+ def _real_extract(self, url):
+ return {
+ '_type': 'url_transparent',
+ 'url': 'foo2:',
+ 'ie_key': 'Foo2',
+ }
+
+ class Foo2IE(InfoExtractor):
+ _VALID_URL = r'foo2:'
+
+ def _real_extract(self, url):
+ return {
+ '_type': 'url',
+ 'url': 'foo3:',
+ 'ie_key': 'Foo3',
+ }
+
+ class Foo3IE(InfoExtractor):
+ _VALID_URL = r'foo3:'
+
+ def _real_extract(self, url):
+ return _make_result([{'url': TEST_URL}])
+
+ ydl.add_info_extractor(Foo1IE(ydl))
+ ydl.add_info_extractor(Foo2IE(ydl))
+ ydl.add_info_extractor(Foo3IE(ydl))
+ ydl.extract_info('foo1:')
+ downloaded = ydl.downloaded_info_dicts[0]
+ self.assertEqual(downloaded['url'], TEST_URL)
+
if __name__ == '__main__':
unittest.main()
assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
def test_youtube_user_matching(self):
- self.assertMatch('www.youtube.com/NASAgovVideo/videos', ['youtube:user'])
+ self.assertMatch('http://www.youtube.com/NASAgovVideo/videos', ['youtube:user'])
def test_youtube_feeds(self):
self.assertMatch('https://www.youtube.com/feed/watch_later', ['youtube:watchlater'])
def test_pbs(self):
# https://github.com/rg3/youtube-dl/issues/2350
- self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['PBS'])
- self.assertMatch('http://video.pbs.org/widget/partnerplayer/980042464/', ['PBS'])
+ self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['pbs'])
+ self.assertMatch('http://video.pbs.org/widget/partnerplayer/980042464/', ['pbs'])
def test_yahoo_https(self):
# https://github.com/rg3/youtube-dl/issues/2701
--- /dev/null
+#!/usr/bin/env python
+
+from __future__ import unicode_literals
+
+# Allow direct execution
+import os
+import sys
+import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from test.helper import FakeYDL
+from youtube_dl.extractor import IqiyiIE
+
+
+class IqiyiIEWithCredentials(IqiyiIE):
+ def _get_login_info(self):
+ return 'foo', 'bar'
+
+
+class WarningLogger(object):
+ def __init__(self):
+ self.messages = []
+
+ def warning(self, msg):
+ self.messages.append(msg)
+
+ def debug(self, msg):
+ pass
+
+ def error(self, msg):
+ pass
+
+
+class TestIqiyiSDKInterpreter(unittest.TestCase):
+ def test_iqiyi_sdk_interpreter(self):
+ '''
+ Test the functionality of IqiyiSDKInterpreter by trying to log in
+
+ If `sign` is incorrect, /validate call throws an HTTP 556 error
+ '''
+ logger = WarningLogger()
+ ie = IqiyiIEWithCredentials(FakeYDL({'logger': logger}))
+ ie._login()
+ self.assertTrue('unable to log in:' in logger.messages[0])
+
+if __name__ == '__main__':
+ unittest.main()
from youtube_dl.extractor import (
- BlipTVIE,
YoutubeIE,
DailymotionIE,
TEDIE,
NPOIE,
ComedyCentralIE,
NRKTVIE,
- RaiIE,
+ RaiTVIE,
VikiIE,
ThePlatformIE,
ThePlatformFeedIE,
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(len(subtitles.keys()), 13)
- self.assertEqual(md5(subtitles['en']), '4cd9278a35ba2305f47354ee13472260')
- self.assertEqual(md5(subtitles['it']), '164a51f16f260476a05b50fe4c2f161d')
- for lang in ['it', 'fr', 'de']:
+ self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06')
+ self.assertEqual(md5(subtitles['it']), '6d752b98c31f1cf8d597050c7a2cb4b5')
+ for lang in ['fr', 'de']:
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
- def test_youtube_subtitles_sbv_format(self):
+ def test_youtube_subtitles_ttml_format(self):
self.DL.params['writesubtitles'] = True
- self.DL.params['subtitlesformat'] = 'sbv'
+ self.DL.params['subtitlesformat'] = 'ttml'
subtitles = self.getSubtitles()
- self.assertEqual(md5(subtitles['en']), '13aeaa0c245a8bed9a451cb643e3ad8b')
+ self.assertEqual(md5(subtitles['en']), 'e306f8c42842f723447d9f63ad65df54')
def test_youtube_subtitles_vtt_format(self):
self.DL.params['writesubtitles'] = True
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
-class TestBlipTVSubtitles(BaseTestSubtitles):
- url = 'http://blip.tv/a/a-6603250'
- IE = BlipTVIE
-
- def test_allsubtitles(self):
- self.DL.params['writesubtitles'] = True
- self.DL.params['allsubtitles'] = True
- subtitles = self.getSubtitles()
- self.assertEqual(set(subtitles.keys()), set(['en']))
- self.assertEqual(md5(subtitles['en']), '5b75c300af65fe4476dff79478bb93e4')
-
-
class TestVimeoSubtitles(BaseTestSubtitles):
url = 'http://vimeo.com/76979871'
IE = VimeoIE
class TestRaiSubtitles(BaseTestSubtitles):
url = 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-cb27157f-9dd0-4aee-b788-b1f67643a391.html'
- IE = RaiIE
+ IE = RaiTVIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
--- /dev/null
+#!/usr/bin/env python
+
+from __future__ import unicode_literals
+
+# Allow direct execution
+import os
+import sys
+import unittest
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+
+import json
+from youtube_dl.update import rsa_verify
+
+
+class TestUpdate(unittest.TestCase):
+ def test_rsa_verify(self):
+ UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
+ with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'versions.json'), 'rb') as f:
+ versions_info = f.read().decode()
+ versions_info = json.loads(versions_info)
+ signature = versions_info['signature']
+ del versions_info['signature']
+ self.assertTrue(rsa_verify(
+ json.dumps(versions_info, sort_keys=True).encode('utf-8'),
+ signature, UPDATES_RSA_KEY))
+
+
+if __name__ == '__main__':
+ unittest.main()
DateRange,
detect_exe_version,
determine_ext,
+ dict_get,
+ encode_compat_str,
encodeFilename,
escape_rfc3986,
escape_url,
is_html,
js_to_json,
limit_length,
+ ohdave_rsa_encrypt,
OnDemandPagedList,
orderedSet,
parse_duration,
sanitize_path,
prepend_extension,
replace_extension,
+ remove_quotes,
shell_quote,
smuggle_url,
str_to_int,
self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp')
+ def test_remove_quotes(self):
+ self.assertEqual(remove_quotes(None), None)
+ self.assertEqual(remove_quotes('"'), '"')
+ self.assertEqual(remove_quotes("'"), "'")
+ self.assertEqual(remove_quotes(';'), ';')
+ self.assertEqual(remove_quotes('";'), '";')
+ self.assertEqual(remove_quotes('""'), '')
+ self.assertEqual(remove_quotes('";"'), ';')
+
def test_ordered_set(self):
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(orderedSet([]), [])
data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'})
self.assertTrue(isinstance(data, bytes))
+ def test_dict_get(self):
+ FALSE_VALUES = {
+ 'none': None,
+ 'false': False,
+ 'zero': 0,
+ 'empty_string': '',
+ 'empty_list': [],
+ }
+ d = FALSE_VALUES.copy()
+ d['a'] = 42
+ self.assertEqual(dict_get(d, 'a'), 42)
+ self.assertEqual(dict_get(d, 'b'), None)
+ self.assertEqual(dict_get(d, 'b', 42), 42)
+ self.assertEqual(dict_get(d, ('a', )), 42)
+ self.assertEqual(dict_get(d, ('b', 'a', )), 42)
+ self.assertEqual(dict_get(d, ('b', 'c', 'a', 'd', )), 42)
+ self.assertEqual(dict_get(d, ('b', 'c', )), None)
+ self.assertEqual(dict_get(d, ('b', 'c', ), 42), 42)
+ for key, false_value in FALSE_VALUES.items():
+ self.assertEqual(dict_get(d, ('b', 'c', key, )), None)
+ self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value)
+
+ def test_encode_compat_str(self):
+ self.assertEqual(encode_compat_str(b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82', 'utf-8'), 'тест')
+ self.assertEqual(encode_compat_str('тест', 'utf-8'), 'тест')
+
def test_parse_iso8601(self):
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
d = json.loads(stripped)
self.assertEqual(d, {'STATUS': 'OK'})
+ stripped = strip_jsonp('ps.embedHandler({"status": "success"});')
+ d = json.loads(stripped)
+ self.assertEqual(d, {'status': 'success'})
+
def test_uppercase_escape(self):
self.assertEqual(uppercase_escape('aä'), 'aä')
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
{'like_count': 190, 'dislike_count': 10}))
def test_parse_dfxp_time_expr(self):
- self.assertEqual(parse_dfxp_time_expr(None), 0.0)
- self.assertEqual(parse_dfxp_time_expr(''), 0.0)
+ self.assertEqual(parse_dfxp_time_expr(None), None)
+ self.assertEqual(parse_dfxp_time_expr(''), None)
self.assertEqual(parse_dfxp_time_expr('0.1'), 0.1)
self.assertEqual(parse_dfxp_time_expr('0.1s'), 0.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01'), 1.0)
self.assertEqual(parse_dfxp_time_expr('00:00:01.100'), 1.1)
+ self.assertEqual(parse_dfxp_time_expr('00:00:01:100'), 1.1)
def test_dfxp2srt(self):
dfxp_data = '''<?xml version="1.0" encoding="UTF-8"?>
<p begin="0" end="1">The following line contains Chinese characters and special symbols</p>
<p begin="1" end="2">第二行<br/>♪♪</p>
<p begin="2" dur="1"><span>Third<br/>Line</span></p>
+ <p begin="3" end="-1">Lines with invalid timestamps are ignored</p>
+ <p begin="-1" end="-1">Ignore, two</p>
+ <p begin="3" dur="-1">Ignored, three</p>
</div>
</body>
</tt>'''
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=true'])
+ def test_ohdave_rsa_encrypt(self):
+ N = 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
+ e = 65537
+
+ self.assertEqual(
+ ohdave_rsa_encrypt(b'aa111222', e, N),
+ '726664bd9a23fd0c70f9f1b84aab5e3905ce1e45a584e9cbcf9bcc7510338fc1986d6c599ff990d923aa43c51c0d9013cd572e13bc58f4ae48f2ed8c0b0ba881')
if __name__ == '__main__':
unittest.main()
textTag = a.find('TEXT')
text = textTag.text
self.assertTrue(text in expected) # assertIn only added in python 2.7
- # remove the first occurance, there could be more than one annotation with the same text
+ # remove the first occurrence, there could be more than one annotation with the same text
expected.remove(text)
# We should have seen (and removed) all the expected annotation texts.
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
ie = YoutubePlaylistIE(dl)
# TODO find a > 100 (paginating?) videos course
result = ie.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
- entries = result['entries']
+ entries = list(result['entries'])
self.assertEqual(YoutubeIE().extract_id(entries[0]['url']), 'j9WZyLZCBzs')
self.assertEqual(len(entries), 25)
self.assertEqual(YoutubeIE().extract_id(entries[-1]['url']), 'rYefUsYuEp0')
--- /dev/null
+{
+ "latest": "2013.01.06",
+ "signature": "72158cdba391628569ffdbea259afbcf279bbe3d8aeb7492690735dc1cfa6afa754f55c61196f3871d429599ab22f2667f1fec98865527b32632e7f4b3675a7ef0f0fbe084d359256ae4bba68f0d33854e531a70754712f244be71d4b92e664302aa99653ee4df19800d955b6c4149cd2b3f24288d6e4b40b16126e01f4c8ce6",
+ "versions": {
+ "2013.01.02": {
+ "bin": [
+ "http://youtube-dl.org/downloads/2013.01.02/youtube-dl",
+ "f5b502f8aaa77675c4884938b1e4871ebca2611813a0c0e74f60c0fbd6dcca6b"
+ ],
+ "exe": [
+ "http://youtube-dl.org/downloads/2013.01.02/youtube-dl.exe",
+ "75fa89d2ce297d102ff27675aa9d92545bbc91013f52ec52868c069f4f9f0422"
+ ],
+ "tar": [
+ "http://youtube-dl.org/downloads/2013.01.02/youtube-dl-2013.01.02.tar.gz",
+ "6a66d022ac8e1c13da284036288a133ec8dba003b7bd3a5179d0c0daca8c8196"
+ ]
+ },
+ "2013.01.06": {
+ "bin": [
+ "http://youtube-dl.org/downloads/2013.01.06/youtube-dl",
+ "64b6ed8865735c6302e836d4d832577321b4519aa02640dc508580c1ee824049"
+ ],
+ "exe": [
+ "http://youtube-dl.org/downloads/2013.01.06/youtube-dl.exe",
+ "58609baf91e4389d36e3ba586e21dab882daaaee537e4448b1265392ae86ff84"
+ ],
+ "tar": [
+ "http://youtube-dl.org/downloads/2013.01.06/youtube-dl-2013.01.06.tar.gz",
+ "fe77ab20a95d980ed17a659aa67e371fdd4d656d19c4c7950e7b720b0c2f1a86"
+ ]
+ }
+ }
+}
\ No newline at end of file
+.\" Automatically generated by Pandoc 1.16.0.2
+.\"
.TH "YOUTUBE\-DL" "1" "" "" ""
+.hy
.SH NAME
.PP
youtube\-dl \- download videos from youtube.com or other video platforms
.RS
.RE
.TP
+.B \-\-hls\-use\-mpegts
+Use the mpegts container for HLS videos, allowing to play the video
+while downloading (some players may not be able to play it)
+.RS
+.RE
+.TP
.B \-\-external\-downloader \f[I]COMMAND\f[]
Use the specified external downloader.
Currently supports aria2c,axel,curl,httpie,wget
.RE
.TP
.B \-F, \-\-list\-formats
-List all available formats of specified videos
+List all available formats of requested videos
.RS
.RE
.TP
.TP
.B \-\-sub\-lang \f[I]LANGS\f[]
Languages of the subtitles to download (optional) separated by commas,
-use IETF language tags like \[aq]en,pt\[aq]
+use \-\-list\- subs for available language tags
.RS
.RE
.SS Authentication Options:
.RS
.RE
.TP
-.B \-\-convert\-subtitles \f[I]FORMAT\f[]
+.B \-\-convert\-subs \f[I]FORMAT\f[]
Convert the subtitles to other format (currently supported: srt|ass|vtt)
.RS
.RE
followed by a lowercase S.
Allowed names are:
.IP \[bu] 2
-\f[C]id\f[]: The sequence will be replaced by the video identifier.
+\f[C]id\f[]: Video identifier
+.IP \[bu] 2
+\f[C]title\f[]: Video title
+.IP \[bu] 2
+\f[C]url\f[]: Video URL
+.IP \[bu] 2
+\f[C]ext\f[]: Video filename extension
+.IP \[bu] 2
+\f[C]alt_title\f[]: A secondary title of the video
+.IP \[bu] 2
+\f[C]display_id\f[]: An alternative identifier for the video
+.IP \[bu] 2
+\f[C]uploader\f[]: Full name of the video uploader
+.IP \[bu] 2
+\f[C]creator\f[]: The main artist who created the video
+.IP \[bu] 2
+\f[C]release_date\f[]: The date (YYYYMMDD) when the video was released
+.IP \[bu] 2
+\f[C]timestamp\f[]: UNIX timestamp of the moment the video became
+available
+.IP \[bu] 2
+\f[C]upload_date\f[]: Video upload date (YYYYMMDD)
+.IP \[bu] 2
+\f[C]uploader_id\f[]: Nickname or id of the video uploader
+.IP \[bu] 2
+\f[C]location\f[]: Physical location where the video was filmed
+.IP \[bu] 2
+\f[C]duration\f[]: Length of the video in seconds
+.IP \[bu] 2
+\f[C]view_count\f[]: How many users have watched the video on the
+platform
+.IP \[bu] 2
+\f[C]like_count\f[]: Number of positive ratings of the video
+.IP \[bu] 2
+\f[C]dislike_count\f[]: Number of negative ratings of the video
+.IP \[bu] 2
+\f[C]repost_count\f[]: Number of reposts of the video
+.IP \[bu] 2
+\f[C]average_rating\f[]: Average rating give by users, the scale used
+depends on the webpage
+.IP \[bu] 2
+\f[C]comment_count\f[]: Number of comments on the video
+.IP \[bu] 2
+\f[C]age_limit\f[]: Age restriction for the video (years)
+.IP \[bu] 2
+\f[C]format\f[]: A human\-readable description of the format
+.IP \[bu] 2
+\f[C]format_id\f[]: Format code specified by \f[C]\-\-format\f[]
+.IP \[bu] 2
+\f[C]format_note\f[]: Additional info about the format
+.IP \[bu] 2
+\f[C]width\f[]: Width of the video
+.IP \[bu] 2
+\f[C]height\f[]: Height of the video
+.IP \[bu] 2
+\f[C]resolution\f[]: Textual description of width and height
+.IP \[bu] 2
+\f[C]tbr\f[]: Average bitrate of audio and video in KBit/s
+.IP \[bu] 2
+\f[C]abr\f[]: Average audio bitrate in KBit/s
+.IP \[bu] 2
+\f[C]acodec\f[]: Name of the audio codec in use
.IP \[bu] 2
-\f[C]url\f[]: The sequence will be replaced by the video URL.
+\f[C]asr\f[]: Audio sampling rate in Hertz
.IP \[bu] 2
-\f[C]uploader\f[]: The sequence will be replaced by the nickname of the
-person who uploaded the video.
+\f[C]vbr\f[]: Average video bitrate in KBit/s
.IP \[bu] 2
-\f[C]upload_date\f[]: The sequence will be replaced by the upload date
-in YYYYMMDD format.
+\f[C]fps\f[]: Frame rate
.IP \[bu] 2
-\f[C]title\f[]: The sequence will be replaced by the video title.
+\f[C]vcodec\f[]: Name of the video codec in use
.IP \[bu] 2
-\f[C]ext\f[]: The sequence will be replaced by the appropriate extension
-(like flv or mp4).
+\f[C]container\f[]: Name of the container format
.IP \[bu] 2
-\f[C]epoch\f[]: The sequence will be replaced by the Unix epoch when
-creating the file.
+\f[C]filesize\f[]: The number of bytes, if known in advance
.IP \[bu] 2
-\f[C]autonumber\f[]: The sequence will be replaced by a five\-digit
-number that will be increased with each download, starting at zero.
+\f[C]filesize_approx\f[]: An estimate for the number of bytes
.IP \[bu] 2
-\f[C]playlist\f[]: The sequence will be replaced by the name or the id
-of the playlist that contains the video.
+\f[C]protocol\f[]: The protocol that will be used for the actual
+download
.IP \[bu] 2
-\f[C]playlist_index\f[]: The sequence will be replaced by the index of
-the video in the playlist padded with leading zeros according to the
-total length of the playlist.
+\f[C]extractor\f[]: Name of the extractor
.IP \[bu] 2
-\f[C]format_id\f[]: The sequence will be replaced by the format code
-specified by \f[C]\-\-format\f[].
+\f[C]extractor_key\f[]: Key name of the extractor
.IP \[bu] 2
-\f[C]duration\f[]: The sequence will be replaced by the length of the
-video in seconds.
+\f[C]epoch\f[]: Unix epoch when creating the file
+.IP \[bu] 2
+\f[C]autonumber\f[]: Five\-digit number that will be increased with each
+download, starting at zero
+.IP \[bu] 2
+\f[C]playlist\f[]: Name or id of the playlist that contains the video
+.IP \[bu] 2
+\f[C]playlist_index\f[]: Index of the video in the playlist padded with
+leading zeros according to the total length of the playlist
+.PP
+Available for the video that belongs to some logical chapter or section:
+\- \f[C]chapter\f[]: Name or title of the chapter the video belongs to
+\- \f[C]chapter_number\f[]: Number of the chapter the video belongs to
+\- \f[C]chapter_id\f[]: Id of the chapter the video belongs to
+.PP
+Available for the video that is an episode of some series or programme:
+\- \f[C]series\f[]: Title of the series or programme the video episode
+belongs to \- \f[C]season\f[]: Title of the season the video episode
+belongs to \- \f[C]season_number\f[]: Number of the season the video
+episode belongs to \- \f[C]season_id\f[]: Id of the season the video
+episode belongs to \- \f[C]episode\f[]: Title of the video episode \-
+\f[C]episode_number\f[]: Number of the video episode within a season \-
+\f[C]episode_id\f[]: Id of the video episode
+.PP
+Each aforementioned sequence when referenced in output template will be
+replaced by the actual value corresponding to the sequence name.
+Note that some of the sequences are not guaranteed to be present since
+they depend on the metadata obtained by particular extractor, such
+sequences will be replaced with \f[C]NA\f[].
+.PP
+For example for \f[C]\-o\ %(title)s\-%(id)s.%(ext)s\f[] and mp4 video
+with title \f[C]youtube\-dl\ test\ video\f[] and id
+\f[C]BaW_jenozKcj\f[] this will result in a
+\f[C]youtube\-dl\ test\ video\-BaW_jenozKcj.mp4\f[] file created in the
+current directory.
+.PP
+Output template can also contain arbitrary hierarchical path, e.g.
+\f[C]\-o\ \[aq]%(playlist)s/%(playlist_index)s\ \-\ %(title)s.%(ext)s\[aq]\f[]
+that will result in downloading each video in a directory corresponding
+to this path template.
+Any missing directory will be automatically created for you.
+.PP
+To specify percent literal in output template use \f[C]%%\f[].
+To output to stdout use \f[C]\-o\ \-\f[].
.PP
The current default template is \f[C]%(title)s\-%(id)s.%(ext)s\f[].
.PP
system or the filename through an 8bit\-unsafe channel.
In these cases, add the \f[C]\-\-restrict\-filenames\f[] flag to get a
shorter title:
+.PP
+Examples (note on Windows you may need to use double quotes instead of
+single):
.IP
.nf
\f[C]
-$\ youtube\-dl\ \-\-get\-filename\ \-o\ "%(title)s.%(ext)s"\ BaW_jenozKc
+$\ youtube\-dl\ \-\-get\-filename\ \-o\ \[aq]%(title)s.%(ext)s\[aq]\ BaW_jenozKc
youtube\-dl\ test\ video\ \[aq]\[aq]_ä↭𝕐.mp4\ \ \ \ #\ All\ kinds\ of\ weird\ characters
-$\ youtube\-dl\ \-\-get\-filename\ \-o\ "%(title)s.%(ext)s"\ BaW_jenozKc\ \-\-restrict\-filenames
+
+$\ youtube\-dl\ \-\-get\-filename\ \-o\ \[aq]%(title)s.%(ext)s\[aq]\ BaW_jenozKc\ \-\-restrict\-filenames
youtube\-dl_test_video_.mp4\ \ \ \ \ \ \ \ \ \ #\ A\ simple\ file\ name
+
+#\ Download\ YouTube\ playlist\ videos\ in\ separate\ directory\ indexed\ by\ video\ order\ in\ a\ playlist
+$\ youtube\-dl\ \-o\ \[aq]%(playlist)s/%(playlist_index)s\ \-\ %(title)s.%(ext)s\[aq]\ https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re
+
+#\ Download\ Udemy\ course\ keeping\ each\ chapter\ in\ separate\ directory\ under\ MyVideos\ directory\ in\ your\ home
+$\ youtube\-dl\ \-u\ user\ \-p\ password\ \-o\ \[aq]~/MyVideos/%(playlist)s/%(chapter_number)s\ \-\ %(chapter)s/%(title)s.%(ext)s\[aq]\ https://www.udemy.com/java\-tutorial/
+
+#\ Download\ entire\ series\ season\ keeping\ each\ series\ and\ each\ season\ in\ separate\ directory\ under\ C:/MyVideos
+$\ youtube\-dl\ \-o\ "C:/MyVideos/%(series)s/%(season_number)s\ \-\ %(season)s/%(episode_number)s\ \-\ %(episode)s.%(ext)s"\ http://videomore.ru/kino_v_detalayah/5_sezon/367617
+
+#\ Stream\ the\ video\ being\ downloaded\ to\ stdout
+$\ youtube\-dl\ \-o\ \-\ BaW_jenozKc
\f[]
.fi
.SH FORMAT SELECTION
.PP
-By default youtube\-dl tries to download the best quality, but sometimes
-you may want to download in a different format.
-The simplest case is requesting a specific format, for example
-\f[C]\-f\ 22\f[].
-You can get the list of available formats using
-\f[C]\-\-list\-formats\f[], you can also use a file extension (currently
-it supports aac, m4a, mp3, mp4, ogg, wav, webm) or the special names
-\f[C]best\f[], \f[C]bestvideo\f[], \f[C]bestaudio\f[] and
-\f[C]worst\f[].
+By default youtube\-dl tries to download the best available quality,
+i.e.
+if you want the best quality you \f[B]don\[aq]t need\f[] to pass any
+special options, youtube\-dl will guess it for you by \f[B]default\f[].
+.PP
+But sometimes you may want to download in a different format, for
+example when you are on a slow or intermittent connection.
+The key mechanism for achieving this is so called \f[I]format
+selection\f[] based on which you can explicitly specify desired format,
+select formats based on some criterion or criteria, setup precedence and
+much more.
+.PP
+The general syntax for format selection is \f[C]\-\-format\ FORMAT\f[]
+or shorter \f[C]\-f\ FORMAT\f[] where \f[C]FORMAT\f[] is a \f[I]selector
+expression\f[], i.e.
+an expression that describes format or formats you would like to
+download.
+.PP
+The simplest case is requesting a specific format, for example with
+\f[C]\-f\ 22\f[] you can download the format with format code equal to
+22.
+You can get the list of available format codes for particular video
+using \f[C]\-\-list\-formats\f[] or \f[C]\-F\f[].
+Note that these format codes are extractor specific.
+.PP
+You can also use a file extension (currently \f[C]3gp\f[], \f[C]aac\f[],
+\f[C]flv\f[], \f[C]m4a\f[], \f[C]mp3\f[], \f[C]mp4\f[], \f[C]ogg\f[],
+\f[C]wav\f[], \f[C]webm\f[] are supported) to download best quality
+format of particular file extension served as a single file, e.g.
+\f[C]\-f\ webm\f[] will download best quality format with \f[C]webm\f[]
+extension served as a single file.
+.PP
+You can also use special names to select particular edge case format: \-
+\f[C]best\f[]: Select best quality format represented by single file
+with video and audio \- \f[C]worst\f[]: Select worst quality format
+represented by single file with video and audio \- \f[C]bestvideo\f[]:
+Select best quality video only format (e.g.
+DASH video), may not be available \- \f[C]worstvideo\f[]: Select worst
+quality video only format, may not be available \- \f[C]bestaudio\f[]:
+Select best quality audio only format, may not be available \-
+\f[C]worstaudio\f[]: Select worst quality audio only format, may not be
+available
+.PP
+For example, to download worst quality video only format you can use
+\f[C]\-f\ worstvideo\f[].
.PP
If you want to download multiple videos and they don\[aq]t have the same
formats available, you can specify the order of preference using
-slashes, as in \f[C]\-f\ 22/17/18\f[].
-You can also filter the video results by putting a condition in
+slashes.
+Note that slash is left\-associative, i.e.
+formats on the left hand side are preferred, for example
+\f[C]\-f\ 22/17/18\f[] will download format 22 if it\[aq]s available,
+otherwise it will download format 17 if it\[aq]s available, otherwise it
+will download format 18 if it\[aq]s available, otherwise it will
+complain that no suitable formats are available for download.
+.PP
+If you want to download several formats of the same video use comma as a
+separator, e.g.
+\f[C]\-f\ 22,17,18\f[] will download all these three formats, of course
+if they are available.
+Or more sophisticated example combined with precedence feature
+\f[C]\-f\ 136/137/mp4/bestvideo,140/m4a/bestaudio\f[].
+.PP
+You can also filter the video formats by putting a condition in
brackets, as in \f[C]\-f\ "best[height=720]"\f[] (or
\f[C]\-f\ "[filesize>10M]"\f[]).
-This works for filesize, height, width, tbr, abr, vbr, asr, and fps and
-the comparisons <, <=, >, >=, =, != and for ext, acodec, vcodec,
-container, and protocol and the comparisons =, != .
+.PP
+The following numeric meta fields can be used with comparisons
+\f[C]<\f[], \f[C]<=\f[], \f[C]>\f[], \f[C]>=\f[], \f[C]=\f[] (equals),
+\f[C]!=\f[] (not equals): \- \f[C]filesize\f[]: The number of bytes, if
+known in advance \- \f[C]width\f[]: Width of the video, if known \-
+\f[C]height\f[]: Height of the video, if known \- \f[C]tbr\f[]: Average
+bitrate of audio and video in KBit/s \- \f[C]abr\f[]: Average audio
+bitrate in KBit/s \- \f[C]vbr\f[]: Average video bitrate in KBit/s \-
+\f[C]asr\f[]: Audio sampling rate in Hertz \- \f[C]fps\f[]: Frame rate
+.PP
+Also filtering work for comparisons \f[C]=\f[] (equals), \f[C]!=\f[]
+(not equals), \f[C]^=\f[] (begins with), \f[C]$=\f[] (ends with),
+\f[C]*=\f[] (contains) and following string meta fields: \-
+\f[C]ext\f[]: File extension \- \f[C]acodec\f[]: Name of the audio codec
+in use \- \f[C]vcodec\f[]: Name of the video codec in use \-
+\f[C]container\f[]: Name of the container format \- \f[C]protocol\f[]:
+The protocol that will be used for the actual download, lower\-case.
+\f[C]http\f[], \f[C]https\f[], \f[C]rtsp\f[], \f[C]rtmp\f[],
+\f[C]rtmpe\f[], \f[C]m3u8\f[], or \f[C]m3u8_native\f[]
+.PP
+Note that none of the aforementioned meta fields are guaranteed to be
+present since this solely depends on the metadata obtained by particular
+extractor, i.e.
+the metadata offered by video hoster.
+.PP
Formats for which the value is not known are excluded unless you put a
-question mark (?) after the operator.
+question mark (\f[C]?\f[]) after the operator.
You can combine format filters, so
\f[C]\-f\ "[height\ <=?\ 720][tbr>500]"\f[] selects up to 720p videos
(or videos where the height is not known) with a bitrate of at least 500
KBit/s.
-Use commas to download multiple formats, such as
-\f[C]\-f\ 136/137/mp4/bestvideo,140/m4a/bestaudio\f[].
+.PP
You can merge the video and audio of two formats into a single file
using \f[C]\-f\ <video\-format>+<audio\-format>\f[] (requires ffmpeg or
-avconv), for example \f[C]\-f\ bestvideo+bestaudio\f[].
+avconv installed), for example \f[C]\-f\ bestvideo+bestaudio\f[] will
+download best video only format, best audio only format and mux them
+together with ffmpeg/avconv.
+.PP
Format selectors can also be grouped using parentheses, for example if
you want to download the best mp4 and webm formats with a height lower
than 480 you can use \f[C]\-f\ \[aq](mp4,webm)[height<480]\[aq]\f[].
\f[C]best\f[] is also needed for videos that don\[aq]t come from YouTube
because they don\[aq]t provide the audio and video in two different
files.
-If you want to only download some dash formats (for example if you are
+If you want to only download some DASH formats (for example if you are
not interested in getting videos with a resolution higher than 1080p),
you can add \f[C]\-f\ bestvideo[height<=?1080]+bestaudio/best\f[] to
your configuration file.
file, you should explicitly specify your choice with \f[C]\-f\ best\f[].
You may want to add it to the configuration file (#configuration) in
order not to type it every time you run youtube\-dl.
+.PP
+Examples (note on Windows you may need to use double quotes instead of
+single):
+.IP
+.nf
+\f[C]
+#\ Download\ best\ mp4\ format\ available\ or\ any\ other\ best\ if\ no\ mp4\ available
+$\ youtube\-dl\ \-f\ \[aq]bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best\[aq]
+
+#\ Download\ best\ format\ available\ but\ not\ better\ that\ 480p
+$\ youtube\-dl\ \-f\ \[aq]bestvideo[height<=480]+bestaudio/best[height<=480]\[aq]
+
+#\ Download\ best\ video\ only\ format\ but\ no\ bigger\ that\ 50\ MB
+$\ youtube\-dl\ \-f\ \[aq]best[filesize<50M]\[aq]
+
+#\ Download\ best\ format\ available\ via\ direct\ link\ over\ HTTP/HTTPS\ protocol
+$\ youtube\-dl\ \-f\ \[aq](bestvideo+bestaudio/best)[protocol^=http]\[aq]
+\f[]
+.fi
.SH VIDEO SELECTION
.PP
Videos can be filtered by their upload date using the options
for example \f[C]\-o\ "/home/user/videos/%(title)s\-%(id)s.%(ext)s"\f[].
If you want this for all of your downloads, put the option into your
configuration file (#configuration).
-.SS How do I download a video starting with a \f[C]\-\f[] ?
+.SS How do I download a video starting with a \f[C]\-\f[]?
.PP
Either prepend \f[C]http://www.youtube.com/watch?v=\f[] or separate the
ID from the options with \f[C]\-\-\f[]:
.PP
Passing cookies to youtube\-dl is a good way to workaround login when a
particular extractor does not implement it explicitly.
+Another use case is working around
+CAPTCHA (https://en.wikipedia.org/wiki/CAPTCHA) some websites require
+you to solve in particular cases in order to get access (e.g.
+YouTube, CloudFlare).
.SS Can you add support for this anime video site, or site which shows
current movies for free?
.PP
.PP
Most likely, you\[aq]ll want to use various options.
For a list of what can be done, have a look at
-youtube_dl/YoutubeDL.py (https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L117-L265).
+\f[C]youtube_dl/YoutubeDL.py\f[] (https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L121-L269).
For a start, if you want to intercept youtube\-dl\[aq]s output, set a
\f[C]logger\f[] object.
.PP
.SH BUGS
.PP
Bugs and suggestions should be reported at:
-<https://github.com/rg3/youtube-dl/issues> .
+<https://github.com/rg3/youtube-dl/issues>.
Unless you were prompted so or there is another pertinent reason (e.g.
GitHub fails to accept the bug report), please do not send bug reports
via personal email.
-For discussions, join us in the irc channel #youtube\-dl on freenode.
+For discussions, join us in the IRC channel
+#youtube\-dl (irc://chat.freenode.net/#youtube-dl) on freenode
+(webchat (http://webchat.freenode.net/?randomnick=1&channels=youtube-dl)).
.PP
\f[B]Please include the full output of youtube\-dl when run with
-\f[C]\-v\f[]\f[].
+\f[C]\-v\f[]\f[], i.e.
+\f[B]add\f[] \f[C]\-v\f[] flag to \f[B]your command line\f[], copy the
+\f[B]whole\f[] output and post it in the issue body wrapped in ``` for
+better formatting.
+It should look similar to this:
+.IP
+.nf
+\f[C]
+$\ youtube\-dl\ \-v\ <your\ command\ line>
+[debug]\ System\ config:\ []
+[debug]\ User\ config:\ []
+[debug]\ Command\-line\ args:\ [u\[aq]\-v\[aq],\ u\[aq]http://www.youtube.com/watch?v=BaW_jenozKcj\[aq]]
+[debug]\ Encodings:\ locale\ cp1251,\ fs\ mbcs,\ out\ cp866,\ pref\ cp1251
+[debug]\ youtube\-dl\ version\ 2015.12.06
+[debug]\ Git\ HEAD:\ 135392e
+[debug]\ Python\ version\ 2.6.6\ \-\ Windows\-2003Server\-5.2.3790\-SP2
+[debug]\ exe\ versions:\ ffmpeg\ N\-75573\-g1d0487f,\ ffprobe\ N\-75573\-g1d0487f,\ rtmpdump\ 2.4
+[debug]\ Proxy\ map:\ {}
+\&...
+\f[]
+.fi
+.PP
+\f[B]Do not post screenshots of verbose log only plain text is
+acceptable.\f[]
.PP
The output (including the first lines) contains important debugging
information.
some of these, which makes it hard for us to respond to it.
We\[aq]re often too polite to close the issue outright, but the missing
info makes misinterpretation likely.
-As a commiter myself, I often get frustrated by these issues, since the
+As a committer myself, I often get frustrated by these issues, since the
only possible way for me to move forward on them is to ask for
clarification over and over.
.PP
.PP
\f[B]Site support requests must contain an example URL\f[].
An example URL is a URL you might want to download, like
-http://www.youtube.com/watch?v=BaW_jenozKc .
+\f[C]http://www.youtube.com/watch?v=BaW_jenozKc\f[].
There should be an obvious video present.
Except under very special circumstances, the main page of a video
service (e.g.
-http://www.youtube.com/ ) is \f[I]not\f[] an example URL.
+\f[C]http://www.youtube.com/\f[]) is \f[I]not\f[] an example URL.
.SS Are you using the latest version?
.PP
Before reporting any issue, type \f[C]youtube\-dl\ \-U\f[].
.PP
Make sure that someone has not already opened the issue you\[aq]re
trying to open.
-Search at the top of the window or at
-https://github.com/rg3/youtube\-dl/search?type=Issues .
+Search at the top of the window or browse the GitHub
+Issues (https://github.com/rg3/youtube-dl/search?type=Issues) of this
+repository.
If there is an issue, feel free to write something along the lines of
"This affects me as well, with version 2015.01.01.
Here is some more information on the issue: ...".
.PP
youtube\-dl is released into the public domain by the copyright holders.
.PP
-This README file was originally written by Daniel Bolton
-(<https://github.com/dbbolton>) and is likewise released into the public
-domain.
+This README file was originally written by Daniel
+Bolton (https://github.com/dbbolton) and is likewise released into the
+public domain.
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
- opts="--help --version --update --ignore-errors --abort-on-error --dump-user-agent --list-extractors --extractor-descriptions --force-generic-extractor --default-search --ignore-config --flat-playlist --no-color --proxy --socket-timeout --source-address --force-ipv4 --force-ipv6 --cn-verification-proxy --playlist-start --playlist-end --playlist-items --match-title --reject-title --max-downloads --min-filesize --max-filesize --date --datebefore --dateafter --min-views --max-views --match-filter --no-playlist --yes-playlist --age-limit --download-archive --include-ads --rate-limit --retries --buffer-size --no-resize-buffer --test --playlist-reverse --xattr-set-filesize --hls-prefer-native --external-downloader --external-downloader-args --batch-file --id --output --autonumber-size --restrict-filenames --auto-number --title --literal --no-overwrites --continue --no-continue --no-part --no-mtime --write-description --write-info-json --write-annotations --load-info --cookies --cache-dir --no-cache-dir --rm-cache-dir --write-thumbnail --write-all-thumbnails --list-thumbnails --quiet --no-warnings --simulate --skip-download --get-url --get-title --get-id --get-thumbnail --get-description --get-duration --get-filename --get-format --dump-json --dump-single-json --print-json --newline --no-progress --console-title --verbose --dump-pages --write-pages --youtube-print-sig-code --print-traffic --call-home --no-call-home --encoding --no-check-certificate --prefer-insecure --user-agent --referer --add-header --bidi-workaround --sleep-interval --format --all-formats --prefer-free-formats --list-formats --youtube-include-dash-manifest --youtube-skip-dash-manifest --merge-output-format --write-sub --write-auto-sub --all-subs --list-subs --sub-format --sub-lang --username --password --twofactor --netrc --video-password --extract-audio --audio-format --audio-quality --recode-video --postprocessor-args --keep-video --no-post-overwrites --embed-subs --embed-thumbnail --add-metadata --metadata-from-title --xattrs --fixup --prefer-avconv --prefer-ffmpeg --ffmpeg-location --exec --convert-subtitles"
+ opts="--help --version --update --ignore-errors --abort-on-error --dump-user-agent --list-extractors --extractor-descriptions --force-generic-extractor --default-search --ignore-config --flat-playlist --no-color --proxy --socket-timeout --source-address --force-ipv4 --force-ipv6 --cn-verification-proxy --playlist-start --playlist-end --playlist-items --match-title --reject-title --max-downloads --min-filesize --max-filesize --date --datebefore --dateafter --min-views --max-views --match-filter --no-playlist --yes-playlist --age-limit --download-archive --include-ads --rate-limit --retries --buffer-size --no-resize-buffer --test --playlist-reverse --xattr-set-filesize --hls-prefer-native --hls-use-mpegts --external-downloader --external-downloader-args --batch-file --id --output --autonumber-size --restrict-filenames --auto-number --title --literal --no-overwrites --continue --no-continue --no-part --no-mtime --write-description --write-info-json --write-annotations --load-info --cookies --cache-dir --no-cache-dir --rm-cache-dir --write-thumbnail --write-all-thumbnails --list-thumbnails --quiet --no-warnings --simulate --skip-download --get-url --get-title --get-id --get-thumbnail --get-description --get-duration --get-filename --get-format --dump-json --dump-single-json --print-json --newline --no-progress --console-title --verbose --dump-pages --write-pages --youtube-print-sig-code --print-traffic --call-home --no-call-home --encoding --no-check-certificate --prefer-insecure --user-agent --referer --add-header --bidi-workaround --sleep-interval --format --all-formats --prefer-free-formats --list-formats --youtube-include-dash-manifest --youtube-skip-dash-manifest --merge-output-format --write-sub --write-auto-sub --all-subs --list-subs --sub-format --sub-lang --username --password --twofactor --netrc --video-password --extract-audio --audio-format --audio-quality --recode-video --postprocessor-args --keep-video --no-post-overwrites --embed-subs --embed-thumbnail --add-metadata --metadata-from-title --xattrs --fixup --prefer-avconv --prefer-ffmpeg --ffmpeg-location --exec --convert-subs"
keywords=":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory"
fileopts="-a|--batch-file|--download-archive|--cookies|--load-info"
diropts="--cache-dir"
complete --command youtube-dl --long-option playlist-reverse --description 'Download playlist videos in reverse order'
complete --command youtube-dl --long-option xattr-set-filesize --description 'Set file xattribute ytdl.filesize with expected filesize (experimental)'
complete --command youtube-dl --long-option hls-prefer-native --description 'Use the native HLS downloader instead of ffmpeg (experimental)'
+complete --command youtube-dl --long-option hls-use-mpegts --description 'Use the mpegts container for HLS videos, allowing to play the video while downloading (some players may not be able to play it)'
complete --command youtube-dl --long-option external-downloader --description 'Use the specified external downloader. Currently supports aria2c,axel,curl,httpie,wget'
complete --command youtube-dl --long-option external-downloader-args --description 'Give these arguments to the external downloader'
complete --command youtube-dl --long-option batch-file --short-option a --description 'File containing URLs to download ('"'"'-'"'"' for stdin)' --require-parameter
complete --command youtube-dl --long-option format --short-option f --description 'Video format code, see the "FORMAT SELECTION" for all the info'
complete --command youtube-dl --long-option all-formats --description 'Download all available video formats'
complete --command youtube-dl --long-option prefer-free-formats --description 'Prefer free video formats unless a specific one is requested'
-complete --command youtube-dl --long-option list-formats --short-option F --description 'List all available formats of specified videos'
+complete --command youtube-dl --long-option list-formats --short-option F --description 'List all available formats of requested videos'
complete --command youtube-dl --long-option youtube-include-dash-manifest
complete --command youtube-dl --long-option youtube-skip-dash-manifest --description 'Do not download the DASH manifests and related data on YouTube videos'
complete --command youtube-dl --long-option merge-output-format --description 'If a merge is required (e.g. bestvideo+bestaudio), output to given container format. One of mkv, mp4, ogg, webm, flv. Ignored if no merge is required'
complete --command youtube-dl --long-option all-subs --description 'Download all the available subtitles of the video'
complete --command youtube-dl --long-option list-subs --description 'List all available subtitles for the video'
complete --command youtube-dl --long-option sub-format --description 'Subtitle format, accepts formats preference, for example: "srt" or "ass/srt/best"'
-complete --command youtube-dl --long-option sub-lang --description 'Languages of the subtitles to download (optional) separated by commas, use IETF language tags like '"'"'en,pt'"'"''
+complete --command youtube-dl --long-option sub-lang --description 'Languages of the subtitles to download (optional) separated by commas, use --list-subs for available language tags'
complete --command youtube-dl --long-option username --short-option u --description 'Login with this account ID'
complete --command youtube-dl --long-option password --short-option p --description 'Account password. If this option is left out, youtube-dl will ask interactively.'
complete --command youtube-dl --long-option twofactor --short-option 2 --description 'Two-factor auth code'
complete --command youtube-dl --long-option prefer-ffmpeg --description 'Prefer ffmpeg over avconv for running the postprocessors'
complete --command youtube-dl --long-option ffmpeg-location --description 'Location of the ffmpeg/avconv binary; either the path to the binary or its containing directory.'
complete --command youtube-dl --long-option exec --description 'Execute a command on the file after downloading, similar to find'"'"'s -exec syntax. Example: --exec '"'"'adb push {} /sdcard/Music/ && rm {}'"'"''
-complete --command youtube-dl --long-option convert-subtitles --description 'Convert the subtitles to other format (currently supported: srt|ass|vtt)'
+complete --command youtube-dl --long-option convert-subs --description 'Convert the subtitles to other format (currently supported: srt|ass|vtt)'
complete --command youtube-dl --arguments ":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory"
elif [[ ${prev} == "--recode-video" ]]; then
_arguments '*: :(mp4 flv ogg webm mkv)'
else
- _arguments '*: :(--help --version --update --ignore-errors --abort-on-error --dump-user-agent --list-extractors --extractor-descriptions --force-generic-extractor --default-search --ignore-config --flat-playlist --no-color --proxy --socket-timeout --source-address --force-ipv4 --force-ipv6 --cn-verification-proxy --playlist-start --playlist-end --playlist-items --match-title --reject-title --max-downloads --min-filesize --max-filesize --date --datebefore --dateafter --min-views --max-views --match-filter --no-playlist --yes-playlist --age-limit --download-archive --include-ads --rate-limit --retries --buffer-size --no-resize-buffer --test --playlist-reverse --xattr-set-filesize --hls-prefer-native --external-downloader --external-downloader-args --batch-file --id --output --autonumber-size --restrict-filenames --auto-number --title --literal --no-overwrites --continue --no-continue --no-part --no-mtime --write-description --write-info-json --write-annotations --load-info --cookies --cache-dir --no-cache-dir --rm-cache-dir --write-thumbnail --write-all-thumbnails --list-thumbnails --quiet --no-warnings --simulate --skip-download --get-url --get-title --get-id --get-thumbnail --get-description --get-duration --get-filename --get-format --dump-json --dump-single-json --print-json --newline --no-progress --console-title --verbose --dump-pages --write-pages --youtube-print-sig-code --print-traffic --call-home --no-call-home --encoding --no-check-certificate --prefer-insecure --user-agent --referer --add-header --bidi-workaround --sleep-interval --format --all-formats --prefer-free-formats --list-formats --youtube-include-dash-manifest --youtube-skip-dash-manifest --merge-output-format --write-sub --write-auto-sub --all-subs --list-subs --sub-format --sub-lang --username --password --twofactor --netrc --video-password --extract-audio --audio-format --audio-quality --recode-video --postprocessor-args --keep-video --no-post-overwrites --embed-subs --embed-thumbnail --add-metadata --metadata-from-title --xattrs --fixup --prefer-avconv --prefer-ffmpeg --ffmpeg-location --exec --convert-subtitles)'
+ _arguments '*: :(--help --version --update --ignore-errors --abort-on-error --dump-user-agent --list-extractors --extractor-descriptions --force-generic-extractor --default-search --ignore-config --flat-playlist --no-color --proxy --socket-timeout --source-address --force-ipv4 --force-ipv6 --cn-verification-proxy --playlist-start --playlist-end --playlist-items --match-title --reject-title --max-downloads --min-filesize --max-filesize --date --datebefore --dateafter --min-views --max-views --match-filter --no-playlist --yes-playlist --age-limit --download-archive --include-ads --rate-limit --retries --buffer-size --no-resize-buffer --test --playlist-reverse --xattr-set-filesize --hls-prefer-native --hls-use-mpegts --external-downloader --external-downloader-args --batch-file --id --output --autonumber-size --restrict-filenames --auto-number --title --literal --no-overwrites --continue --no-continue --no-part --no-mtime --write-description --write-info-json --write-annotations --load-info --cookies --cache-dir --no-cache-dir --rm-cache-dir --write-thumbnail --write-all-thumbnails --list-thumbnails --quiet --no-warnings --simulate --skip-download --get-url --get-title --get-id --get-thumbnail --get-description --get-duration --get-filename --get-format --dump-json --dump-single-json --print-json --newline --no-progress --console-title --verbose --dump-pages --write-pages --youtube-print-sig-code --print-traffic --call-home --no-call-home --encoding --no-check-certificate --prefer-insecure --user-agent --referer --add-header --bidi-workaround --sleep-interval --format --all-formats --prefer-free-formats --list-formats --youtube-include-dash-manifest --youtube-skip-dash-manifest --merge-output-format --write-sub --write-auto-sub --all-subs --list-subs --sub-format --sub-lang --username --password --twofactor --netrc --video-password --extract-audio --audio-format --audio-quality --recode-video --postprocessor-args --keep-video --no-post-overwrites --embed-subs --embed-thumbnail --add-metadata --metadata-from-title --xattrs --fixup --prefer-avconv --prefer-ffmpeg --ffmpeg-location --exec --convert-subs)'
fi
;;
esac
DateRange,
DEFAULT_OUTTMPL,
determine_ext,
+ determine_protocol,
DownloadError,
+ encode_compat_str,
encodeFilename,
+ error_to_compat_str,
ExtractorError,
format_bytes,
formatSeconds,
the downloader (see youtube_dl/downloader/common.py):
nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
noresizebuffer, retries, continuedl, noprogress, consoletitle,
- xattr_set_filesize, external_downloader_args.
+ xattr_set_filesize, external_downloader_args, hls_use_mpegts.
The following options are used by the post processors:
prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available,
tb = ''
if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
- tb += compat_str(traceback.format_exc())
+ tb += encode_compat_str(traceback.format_exc())
else:
tb_data = traceback.format_list(traceback.extract_stack())
tb = ''.join(tb_data)
if rejecttitle:
if re.search(rejecttitle, title, re.IGNORECASE):
return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
- date = info_dict.get('upload_date', None)
+ date = info_dict.get('upload_date')
if date is not None:
dateRange = self.params.get('daterange', DateRange())
if date not in dateRange:
return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
- view_count = info_dict.get('view_count', None)
+ view_count = info_dict.get('view_count')
if view_count is not None:
min_views = self.params.get('min_views')
if min_views is not None and view_count < min_views:
return self.process_ie_result(ie_result, download, extra_info)
else:
return ie_result
- except ExtractorError as de: # An error we somewhat expected
- self.report_error(compat_str(de), de.format_traceback())
+ except ExtractorError as e: # An error we somewhat expected
+ self.report_error(compat_str(e), e.format_traceback())
break
except MaxDownloadsReached:
raise
except Exception as e:
if self.params.get('ignoreerrors', False):
- self.report_error(compat_str(e), tb=compat_str(traceback.format_exc()))
+ self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
break
else:
raise
It will also download the videos if 'download'.
Returns the resolved ie_result.
"""
-
result_type = ie_result.get('_type', 'video')
if result_type in ('url', 'url_transparent'):
force_properties = dict(
(k, v) for k, v in ie_result.items() if v is not None)
- for f in ('_type', 'url'):
+ for f in ('_type', 'url', 'ie_key'):
if f in force_properties:
del force_properties[f]
new_result = info.copy()
new_result, download=download, extra_info=extra_info)
elif result_type == 'playlist' or result_type == 'multi_video':
# We process each entry in the playlist
- playlist = ie_result.get('title', None) or ie_result.get('id', None)
+ playlist = ie_result.get('title') or ie_result.get('id')
self.to_screen('[download] Downloading playlist: %s' % playlist)
playlist_results = []
playliststart = self.params.get('playliststart', 1) - 1
- playlistend = self.params.get('playlistend', None)
+ playlistend = self.params.get('playlistend')
# For backwards compatibility, interpret -1 as whole list
if playlistend == -1:
playlistend = None
- playlistitems_str = self.params.get('playlist_items', None)
+ playlistitems_str = self.params.get('playlist_items')
playlistitems = None
if playlistitems_str is not None:
def iter_playlistitems(format):
entries = ie_entries[playliststart:playlistend]
n_entries = len(entries)
self.to_screen(
- "[%s] playlist %s: Collected %d video ids (downloading %d of them)" %
+ '[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
(ie_result['extractor'], playlist, n_all_entries, n_entries))
elif isinstance(ie_entries, PagedList):
if playlistitems:
playliststart, playlistend)
n_entries = len(entries)
self.to_screen(
- "[%s] playlist %s: Downloading %d videos" %
+ '[%s] playlist %s: Downloading %d videos' %
(ie_result['extractor'], playlist, n_entries))
else: # iterable
if playlistitems:
ie_entries, playliststart, playlistend))
n_entries = len(entries)
self.to_screen(
- "[%s] playlist %s: Downloading %d videos" %
+ '[%s] playlist %s: Downloading %d videos' %
(ie_result['extractor'], playlist, n_entries))
if self.params.get('playlistreverse', False):
STR_OPERATORS = {
'=': operator.eq,
'!=': operator.ne,
+ '^=': lambda attr, value: attr.startswith(value),
+ '$=': lambda attr, value: attr.endswith(value),
+ '*=': lambda attr, value: value in attr,
}
str_operator_rex = re.compile(r'''(?x)
\s*(?P<key>ext|acodec|vcodec|container|protocol)
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?
- \s*(?P<value>[a-zA-Z0-9_-]+)
+ \s*(?P<value>[a-zA-Z0-9._-]+)
\s*$
''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
m = str_operator_rex.search(filter_spec)
'contain the video, try using '
'"-f %s+%s"' % (format_2, format_1))
return
+ # Formats must be opposite (video+audio)
+ if formats_info[0].get('acodec') == 'none' and formats_info[1].get('acodec') == 'none':
+ self.report_error(
+ 'Both formats %s and %s are video-only, you must specify "-f video+audio"'
+ % (format_1, format_2))
+ return
output_ext = (
formats_info[0]['ext']
if self.params.get('merge_output_format') is None
except (ValueError, OverflowError, OSError):
pass
+ # Auto generate title fields corresponding to the *_number fields when missing
+ # in order to always have clean titles. This is very common for TV series.
+ for field in ('chapter', 'season', 'episode'):
+ if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
+ info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
+
subtitles = info_dict.get('subtitles')
if subtitles:
for _, subtitle in subtitles.items():
if format.get('format_id') is None:
format['format_id'] = compat_str(i)
+ else:
+ # Sanitize format_id from characters used in format selector expression
+ format['format_id'] = re.sub('[\s,/+\[\]()]', '_', format['format_id'])
format_id = format['format_id']
if format_id not in formats_dict:
formats_dict[format_id] = []
# Automatically determine file extension if missing
if 'ext' not in format:
format['ext'] = determine_ext(format['url']).lower()
+ # Automatically determine protocol if missing (useful for format
+ # selection purposes)
+ if 'protocol' not in format:
+ format['protocol'] = determine_protocol(format)
# Add HTTP headers, so that external programs can use them from the
# json output
full_format_info = info_dict.copy()
# only set the 'formats' fields if the original info_dict list them
# otherwise we end up with a circular reference, the first (and unique)
# element in the 'formats' field in info_dict is info_dict itself,
- # wich can't be exported to json
+ # which can't be exported to json
info_dict['formats'] = formats
if self.params.get('listformats'):
self.list_formats(info_dict)
if req_format is None:
req_format_list = []
if (self.params.get('outtmpl', DEFAULT_OUTTMPL) != '-' and
- info_dict['extractor'] in ['youtube', 'ted'] and
not info_dict.get('is_live')):
merger = FFmpegMergerPP(self)
if merger.available and merger.can_merge():
if dn and not os.path.exists(dn):
os.makedirs(dn)
except (OSError, IOError) as err:
- self.report_error('unable to create directory ' + compat_str(err))
+ self.report_error('unable to create directory ' + error_to_compat_str(err))
return
if self.params.get('writedescription', False):
sub_info['url'], info_dict['id'], note=False)
except ExtractorError as err:
self.report_warning('Unable to download subtitle for "%s": %s' %
- (sub_lang, compat_str(err.cause)))
+ (sub_lang, error_to_compat_str(err.cause)))
continue
try:
sub_filename = subtitles_filename(filename, sub_lang, sub_format)
else:
res = '%sp' % format['height']
elif format.get('width') is not None:
- res = '?x%d' % format['width']
+ res = '%dx?' % format['width']
else:
res = default
return res
res = ''
if fdict.get('ext') in ['f4f', 'f4m']:
res += '(unsupported) '
+ if fdict.get('language'):
+ if res:
+ res += ' '
+ res += '[%s]' % fdict['language']
if fdict.get('format_note') is not None:
res += fdict['format_note'] + ' '
if fdict.get('tbr') is not None:
https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
data_handler = compat_urllib_request_DataHandler()
+
+ # When passing our own FileHandler instance, build_opener won't add the
+ # default FileHandler and allows us to disable the file protocol, which
+ # can be used for malicious purposes (see
+ # https://github.com/rg3/youtube-dl/issues/8227)
+ file_handler = compat_urllib_request.FileHandler()
+
+ def file_open(*args, **kwargs):
+ raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in youtube-dl for security reasons')
+ file_handler.file_open = file_open
+
opener = compat_urllib_request.build_opener(
- proxy_handler, https_handler, cookie_processor, ydlh, data_handler)
+ proxy_handler, https_handler, cookie_processor, ydlh, data_handler, file_handler)
# Delete the default user-agent header, which would otherwise apply in
# cases where our custom HTTP handler doesn't come into play
(info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.report_warning('Unable to download thumbnail "%s": %s' %
- (t['url'], compat_str(err)))
+ (t['url'], error_to_compat_str(err)))
'no_color': opts.no_color,
'ffmpeg_location': opts.ffmpeg_location,
'hls_prefer_native': opts.hls_prefer_native,
+ 'hls_use_mpegts': opts.hls_use_mpegts,
'external_downloader_args': external_downloader_args,
'postprocessor_args': postprocessor_args,
'cn_verification_proxy': opts.cn_verification_proxy,
import sys
-if __package__ is None and not hasattr(sys, "frozen"):
+if __package__ is None and not hasattr(sys, 'frozen'):
# direct call of __main__.py
import os.path
path = os.path.realpath(os.path.abspath(__file__))
nonce = data[:NONCE_LENGTH_BYTES]
cipher = data[NONCE_LENGTH_BYTES:]
- class Counter:
+ class Counter(object):
__value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
def next_value(self):
# parameter := attribute "=" value
url = req.get_full_url()
- scheme, data = url.split(":", 1)
- mediatype, data = data.split(",", 1)
+ scheme, data = url.split(':', 1)
+ mediatype, data = data.split(',', 1)
# even base64 encoded data URLs might be quoted so unquote in any case:
data = compat_urllib_parse_unquote_to_bytes(data)
- if mediatype.endswith(";base64"):
+ if mediatype.endswith(';base64'):
data = binascii.a2b_base64(data)
mediatype = mediatype[:-7]
if not mediatype:
- mediatype = "text/plain;charset=US-ASCII"
+ mediatype = 'text/plain;charset=US-ASCII'
headers = email.message_from_string(
- "Content-type: %s\nContent-length: %d\n" % (mediatype, len(data)))
+ 'Content-type: %s\nContent-length: %d\n' % (mediatype, len(data)))
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
- raise ValueError("bad query field: %r" % (name_value,))
+ raise ValueError('bad query field: %r' % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
compat_getpass = getpass.getpass
-# Old 2.6 and 2.7 releases require kwargs to be bytes
+# Python < 2.6.5 require kwargs to be bytes
try:
def _testfunc(x):
pass
if err is not None:
raise err
else:
- raise socket.error("getaddrinfo returns an empty list")
+ raise socket.error('getaddrinfo returns an empty list')
else:
compat_socket_create_connection = socket.create_connection
import sys
import time
-from ..compat import compat_str
from ..utils import (
encodeFilename,
+ error_to_compat_str,
decodeArgument,
format_bytes,
timeconvert,
(experimental)
external_downloader_args: A list of additional command-line arguments for the
external downloader.
+ hls_use_mpegts: Use the mpegts container for HLS videos.
Subclasses of this one must re-define the real_download method.
"""
def slow_down(self, start_time, now, byte_counter):
"""Sleep if the download speed is over the rate limit."""
- rate_limit = self.params.get('ratelimit', None)
+ rate_limit = self.params.get('ratelimit')
if rate_limit is None or byte_counter == 0:
return
if now is None:
return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError) as err:
- self.report_error('unable to rename file: %s' % compat_str(err))
+ self.report_error('unable to rename file: %s' % error_to_compat_str(err))
def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file."""
def report_retry(self, count, retries):
"""Report retry in case of HTTP error 5xx"""
- self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
+ self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %.0f)...' % (count, retries))
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
from __future__ import unicode_literals
+import os
import re
-from .common import FileDownloader
-from ..utils import sanitized_Request
+from .fragment import FragmentFD
+from ..utils import (
+ sanitize_open,
+ encodeFilename,
+)
-class DashSegmentsFD(FileDownloader):
+class DashSegmentsFD(FragmentFD):
"""
Download segments in a DASH manifest
"""
- def real_download(self, filename, info_dict):
- self.report_destination(filename)
- tmpfilename = self.temp_name(filename)
- base_url = info_dict['url']
- segment_urls = info_dict['segment_urls']
-
- is_test = self.params.get('test', False)
- remaining_bytes = self._TEST_FILE_SIZE if is_test else None
- byte_counter = 0
- def append_url_to_file(outf, target_url, target_name, remaining_bytes=None):
- self.to_screen('[DashSegments] %s: Downloading %s' % (info_dict['id'], target_name))
- req = sanitized_Request(target_url)
- if remaining_bytes is not None:
- req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
+ FD_NAME = 'dashsegments'
- data = self.ydl.urlopen(req).read()
+ def real_download(self, filename, info_dict):
+ base_url = info_dict['url']
+ segment_urls = [info_dict['segment_urls'][0]] if self.params.get('test', False) else info_dict['segment_urls']
+ initialization_url = info_dict.get('initialization_url')
- if remaining_bytes is not None:
- data = data[:remaining_bytes]
+ ctx = {
+ 'filename': filename,
+ 'total_frags': len(segment_urls) + (1 if initialization_url else 0),
+ }
- outf.write(data)
- return len(data)
+ self._prepare_and_start_frag_download(ctx)
def combine_url(base_url, target_url):
if re.match(r'^https?://', target_url):
return target_url
return '%s%s%s' % (base_url, '' if base_url.endswith('/') else '/', target_url)
- with open(tmpfilename, 'wb') as outf:
- append_url_to_file(
- outf, combine_url(base_url, info_dict['initialization_url']),
- 'initialization segment')
- for i, segment_url in enumerate(segment_urls):
- segment_len = append_url_to_file(
- outf, combine_url(base_url, segment_url),
- 'segment %d / %d' % (i + 1, len(segment_urls)),
- remaining_bytes)
- byte_counter += segment_len
- if remaining_bytes is not None:
- remaining_bytes -= segment_len
- if remaining_bytes <= 0:
- break
+ segments_filenames = []
- self.try_rename(tmpfilename, filename)
+ def append_url_to_file(target_url, target_filename):
+ success = ctx['dl'].download(target_filename, {'url': combine_url(base_url, target_url)})
+ if not success:
+ return False
+ down, target_sanitized = sanitize_open(target_filename, 'rb')
+ ctx['dest_stream'].write(down.read())
+ down.close()
+ segments_filenames.append(target_sanitized)
- self._hook_progress({
- 'downloaded_bytes': byte_counter,
- 'total_bytes': byte_counter,
- 'filename': filename,
- 'status': 'finished',
- })
+ if initialization_url:
+ append_url_to_file(initialization_url, ctx['tmpfilename'] + '-Init')
+ for i, segment_url in enumerate(segment_urls):
+ segment_filename = '%s-Seg%d' % (ctx['tmpfilename'], i)
+ append_url_to_file(segment_url, segment_filename)
+
+ self._finish_frag_download(ctx)
+
+ for segment_file in segments_filenames:
+ os.remove(encodeFilename(segment_file))
return True
)
from ..utils import (
encodeFilename,
+ fix_xml_ampersands,
sanitize_open,
struct_pack,
struct_unpack,
return fragments_list
def _parse_bootstrap_node(self, node, base_url):
- if node.text is None:
+ # Sometimes non empty inline bootstrap info can be specified along
+ # with bootstrap url attribute (e.g. dummy inline bootstrap info
+ # contains whitespace characters in [1]). We will prefer bootstrap
+ # url over inline bootstrap info when present.
+ # 1. http://live-1-1.rutube.ru/stream/1024/HDS/SD/C2NKsS85HQNckgn5HdEmOQ/1454167650/S-s604419906/move/four/dirs/upper/1024-576p.f4m
+ bootstrap_url = node.get('url')
+ if bootstrap_url:
bootstrap_url = compat_urlparse.urljoin(
- base_url, node.attrib['url'])
+ base_url, bootstrap_url)
boot_info = self._get_bootstrap_from_url(bootstrap_url)
else:
bootstrap_url = None
bootstrap = base64.b64decode(node.text.encode('ascii'))
boot_info = read_bootstrap_info(bootstrap)
- return (boot_info, bootstrap_url)
+ return boot_info, bootstrap_url
def real_download(self, filename, info_dict):
man_url = info_dict['url']
self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
urlh = self.ydl.urlopen(man_url)
man_url = urlh.geturl()
- manifest = urlh.read()
+ # Some manifests may be malformed, e.g. prosiebensat1 generated manifests
+ # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244
+ # and https://github.com/rg3/youtube-dl/issues/7823)
+ manifest = fix_xml_ampersands(urlh.read().decode('utf-8', 'ignore')).strip()
doc = compat_etree_fromstring(manifest)
formats = [(int(f.attrib.get('bitrate', -1)), f)
metadata = None
fragments_list = build_fragments_list(boot_info)
- if self.params.get('test', False):
+ test = self.params.get('test', False)
+ if test:
# We only download the first fragment
fragments_list = fragments_list[:1]
total_frags = len(fragments_list)
ctx = {
'filename': filename,
'total_frags': total_frags,
+ 'live': live,
}
self._prepare_frag_download(ctx)
else:
raise
- if not fragments_list and live and bootstrap_url:
+ if not fragments_list and not test and live and bootstrap_url:
fragments_list = self._update_live_fragments(bootstrap_url, frag_i)
total_frags += len(fragments_list)
if fragments_list and (fragments_list[0][1] > frag_i + 1):
self._start_frag_download(ctx)
def _prepare_frag_download(self, ctx):
- self.to_screen('[%s] Total fragments: %d' % (self.FD_NAME, ctx['total_frags']))
+ if 'live' not in ctx:
+ ctx['live'] = False
+ self.to_screen(
+ '[%s] Total fragments: %s'
+ % (self.FD_NAME, ctx['total_frags'] if not ctx['live'] else 'unknown (live)'))
self.report_destination(ctx['filename'])
dl = HttpQuietDownloader(
self.ydl,
'continuedl': True,
'quiet': True,
'noprogress': True,
- 'ratelimit': self.params.get('ratelimit', None),
+ 'ratelimit': self.params.get('ratelimit'),
'retries': self.params.get('retries', 0),
'test': self.params.get('test', False),
}
'filename': ctx['filename'],
'tmpfilename': ctx['tmpfilename'],
}
+
start = time.time()
- ctx['started'] = start
+ ctx.update({
+ 'started': start,
+ # Total complete fragments downloaded so far in bytes
+ 'complete_frags_downloaded_bytes': 0,
+ # Amount of fragment's bytes downloaded by the time of the previous
+ # frag progress hook invocation
+ 'prev_frag_downloaded_bytes': 0,
+ })
def frag_progress_hook(s):
if s['status'] not in ('downloading', 'finished'):
return
- frag_total_bytes = s.get('total_bytes', 0)
- if s['status'] == 'finished':
- state['downloaded_bytes'] += frag_total_bytes
- state['frag_index'] += 1
-
- estimated_size = (
- (state['downloaded_bytes'] + frag_total_bytes) /
- (state['frag_index'] + 1) * total_frags)
time_now = time.time()
- state['total_bytes_estimate'] = estimated_size
state['elapsed'] = time_now - start
+ frag_total_bytes = s.get('total_bytes') or 0
+ if not ctx['live']:
+ estimated_size = (
+ (ctx['complete_frags_downloaded_bytes'] + frag_total_bytes) /
+ (state['frag_index'] + 1) * total_frags)
+ state['total_bytes_estimate'] = estimated_size
if s['status'] == 'finished':
- progress = self.calc_percent(state['frag_index'], total_frags)
+ state['frag_index'] += 1
+ state['downloaded_bytes'] += frag_total_bytes - ctx['prev_frag_downloaded_bytes']
+ ctx['complete_frags_downloaded_bytes'] = state['downloaded_bytes']
+ ctx['prev_frag_downloaded_bytes'] = 0
else:
frag_downloaded_bytes = s['downloaded_bytes']
- frag_progress = self.calc_percent(frag_downloaded_bytes,
- frag_total_bytes)
- progress = self.calc_percent(state['frag_index'], total_frags)
- progress += frag_progress / float(total_frags)
-
- state['eta'] = self.calc_eta(
- start, time_now, estimated_size, state['downloaded_bytes'] + frag_downloaded_bytes)
+ state['downloaded_bytes'] += frag_downloaded_bytes - ctx['prev_frag_downloaded_bytes']
+ if not ctx['live']:
+ state['eta'] = self.calc_eta(
+ start, time_now, estimated_size,
+ state['downloaded_bytes'])
state['speed'] = s.get('speed')
+ ctx['prev_frag_downloaded_bytes'] = frag_downloaded_bytes
self._hook_progress(state)
ctx['dl'].add_progress_hook(frag_progress_hook)
import os
import re
import subprocess
+import sys
from .common import FileDownloader
from .fragment import FragmentFD
encodeArgument,
encodeFilename,
sanitize_open,
+ handle_youtubedl_headers,
)
if info_dict['http_headers'] and re.match(r'^https?://', url):
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
+ headers = handle_youtubedl_headers(info_dict['http_headers'])
args += [
'-headers',
- ''.join('%s: %s\r\n' % (key, val) for key, val in info_dict['http_headers'].items() if key.lower() != 'accept-encoding')]
+ ''.join('%s: %s\r\n' % (key, val) for key, val in headers.items())]
- args += ['-i', url, '-f', 'mp4', '-c', 'copy', '-bsf:a', 'aac_adtstoasc']
+ args += ['-i', url, '-c', 'copy']
+ if self.params.get('hls_use_mpegts', False):
+ args += ['-f', 'mpegts']
+ else:
+ args += ['-f', 'mp4', '-bsf:a', 'aac_adtstoasc']
args = [encodeArgument(opt) for opt in args]
args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
self._debug_cmd(args)
- retval = subprocess.call(args)
+ proc = subprocess.Popen(args, stdin=subprocess.PIPE)
+ try:
+ retval = proc.wait()
+ except KeyboardInterrupt:
+ # subprocces.run would send the SIGKILL signal to ffmpeg and the
+ # mp4 file couldn't be played, but if we ask ffmpeg to quit it
+ # produces a file that is playable (this is mostly useful for live
+ # streams). Note that Windows is not affected and produces playable
+ # files (see https://github.com/rg3/youtube-dl/issues/8300).
+ if sys.platform != 'win32':
+ proc.communicate(b'q')
+ raise
if retval == 0:
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('\r[%s] %s bytes' % (args[0], fsize))
if data_len is not None:
data_len = int(data_len) + resume_len
- min_data_len = self.params.get("min_filesize", None)
- max_data_len = self.params.get("max_filesize", None)
+ min_data_len = self.params.get('min_filesize')
+ max_data_len = self.params.get('max_filesize')
if min_data_len is not None and data_len < min_data_len:
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
return False
return proc.returncode
url = info_dict['url']
- player_url = info_dict.get('player_url', None)
- page_url = info_dict.get('page_url', None)
- app = info_dict.get('app', None)
- play_path = info_dict.get('play_path', None)
- tc_url = info_dict.get('tc_url', None)
- flash_version = info_dict.get('flash_version', None)
+ player_url = info_dict.get('player_url')
+ page_url = info_dict.get('page_url')
+ app = info_dict.get('app')
+ play_path = info_dict.get('play_path')
+ tc_url = info_dict.get('tc_url')
+ flash_version = info_dict.get('flash_version')
live = info_dict.get('rtmp_live', False)
- conn = info_dict.get('rtmp_conn', None)
- protocol = info_dict.get('rtmp_protocol', None)
+ conn = info_dict.get('rtmp_conn')
+ protocol = info_dict.get('rtmp_protocol')
real_time = info_dict.get('rtmp_real_time', False)
no_resume = info_dict.get('no_resume', False)
continue_dl = self.params.get('continuedl', True)
from .abc import ABCIE
from .abc7news import Abc7NewsIE
from .academicearth import AcademicEarthCourseIE
+from .acast import (
+ ACastIE,
+ ACastChannelIE,
+)
from .addanime import AddAnimeIE
from .adobetv import (
AdobeTVIE,
+ AdobeTVShowIE,
+ AdobeTVChannelIE,
AdobeTVVideoIE,
)
from .adultswim import AdultSwimIE
-from .aftenposten import AftenpostenIE
+from .aenetworks import AENetworksIE
from .aftonbladet import AftonbladetIE
from .airmozilla import AirMozillaIE
from .aljazeera import AlJazeeraIE
from .alphaporno import AlphaPornoIE
+from .animeondemand import AnimeOnDemandIE
from .anitube import AnitubeIE
from .anysex import AnySexIE
from .aol import AolIE
from .allocine import AllocineIE
from .aparat import AparatIE
from .appleconnect import AppleConnectIE
-from .appletrailers import AppleTrailersIE
+from .appletrailers import (
+ AppleTrailersIE,
+ AppleTrailersSectionIE,
+)
from .archiveorg import ArchiveOrgIE
from .ard import (
ARDIE,
ArteTVCreativeIE,
ArteTVConcertIE,
ArteTVFutureIE,
+ ArteTVCinemaIE,
ArteTVDDCIE,
+ ArteTVMagazineIE,
ArteTVEmbedIE,
)
from .atresplayer import AtresPlayerIE
from .atttechchannel import ATTTechChannelIE
+from .audimedia import AudiMediaIE
from .audiomack import AudiomackIE, AudiomackAlbumIE
-from .azubu import AzubuIE
+from .azubu import AzubuIE, AzubuLiveIE
from .baidu import BaiduVideoIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .behindkink import BehindKinkIE
from .beatportpro import BeatportProIE
from .bet import BetIE
+from .bigflix import BigflixIE
from .bild import BildIE
from .bilibili import BiliBiliIE
+from .bleacherreport import (
+ BleacherReportIE,
+ BleacherReportCMSIE,
+)
from .blinkx import BlinkxIE
-from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE
from .bpb import BpbIE
from .br import BRIE
CamdemyIE,
CamdemyFolderIE
)
-from .canal13cl import Canal13clIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
+from .canvas import CanvasIE
+from .cbc import (
+ CBCIE,
+ CBCPlayerIE,
+)
from .cbs import CBSIE
-from .cbsnews import CBSNewsIE
+from .cbsnews import (
+ CBSNewsIE,
+ CBSNewsLiveVideoIE,
+)
from .cbssports import CBSSportsIE
from .ccc import CCCIE
from .ceskatelevize import CeskaTelevizeIE
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
from .condenast import CondeNastIE
from .cracked import CrackedIE
+from .crackle import CrackleIE
from .criterion import CriterionIE
from .crooksandliars import CrooksAndLiarsIE
from .crunchyroll import (
)
from .cspan import CSpanIE
from .ctsnews import CtsNewsIE
+from .cultureunplugged import CultureUnpluggedIE
+from .cwtv import CWTVIE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
DailymotionCloudIE,
)
-from .daum import DaumIE
+from .daum import (
+ DaumIE,
+ DaumClipIE,
+ DaumPlaylistIE,
+ DaumUserIE,
+)
from .dbtv import DBTVIE
-from .dcn import DCNIE
+from .dcn import (
+ DCNIE,
+ DCNVideoIE,
+ DCNLiveIE,
+ DCNSeasonIE,
+)
from .dctp import DctpTvIE
from .deezer import DeezerPlaylistIE
from .democracynow import DemocracynowIE
from .exfm import ExfmIE
from .expotv import ExpoTVIE
from .extremetube import ExtremeTubeIE
-from .facebook import FacebookIE
+from .facebook import (
+ FacebookIE,
+ FacebookPostIE,
+)
from .faz import FazIE
from .fc2 import FC2IE
from .fczenit import FczenitIE
from .folketinget import FolketingetIE
from .footyroom import FootyRoomIE
from .fourtube import FourTubeIE
+from .fox import FOXIE
from .foxgay import FoxgayIE
from .foxnews import FoxNewsIE
from .foxsports import FoxSportsIE
-from .franceculture import FranceCultureIE
+from .franceculture import (
+ FranceCultureIE,
+ FranceCultureEmissionIE,
+)
from .franceinter import FranceInterIE
from .francetv import (
PluzzIE,
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freevideo import FreeVideoIE
+from .funimation import FunimationIE
from .funnyordie import FunnyOrDieIE
+from .gameinformer import GameInformerIE
from .gamekings import GamekingsIE
from .gameone import (
GameOneIE,
from .godtube import GodTubeIE
from .goldenmoustache import GoldenMoustacheIE
from .golem import GolemIE
+from .googledrive import GoogleDriveIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .goshgay import GoshgayIE
+from .gputechconf import GPUTechConfIE
from .groupon import GrouponIE
from .hark import HarkIE
from .hearthisat import HearThisAtIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .historicfilms import HistoricFilmsIE
-from .history import HistoryIE
from .hitbox import HitboxIE, HitboxLiveIE
from .hornbunny import HornBunnyIE
from .hotnewhiphop import HotNewHipHopIE
+from .hotstar import HotStarIE
from .howcast import HowcastIE
from .howstuffworks import HowStuffWorksIE
from .huffpost import HuffPostIE
from .hypem import HypemIE
from .iconosquare import IconosquareIE
-from .ign import IGNIE, OneUPIE
+from .ign import (
+ IGNIE,
+ OneUPIE,
+ PCMagIE,
+)
from .imdb import (
ImdbIE,
ImdbListIE
IviIE,
IviCompilationIE
)
+from .ivideon import IvideonIE
from .izlesene import IzleseneIE
from .jadorecettepub import JadoreCettePubIE
from .jeuxvideo import JeuxVideoIE
from .jove import JoveIE
-from .jukebox import JukeboxIE
+from .jwplatform import JWPlatformIE
from .jpopsukitv import JpopsukiIE
from .kaltura import KalturaIE
from .kanalplay import KanalPlayIE
from .khanacademy import KhanAcademyIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
+from .konserthusetplay import KonserthusetPlayIE
from .kontrtube import KontrTubeIE
from .krasview import KrasViewIE
from .ku6 import Ku6IE
from .la7 import LA7IE
from .laola1tv import Laola1TvIE
from .lecture2go import Lecture2GoIE
+from .lemonde import LemondeIE
from .letv import (
LetvIE,
LetvTvIE,
- LetvPlaylistIE
+ LetvPlaylistIE,
+ LetvCloudIE,
)
from .libsyn import LibsynIE
from .lifenews import (
LivestreamShortenerIE,
)
from .lnkgo import LnkGoIE
+from .lovehomeporn import LoveHomePornIE
from .lrt import LRTIE
from .lynda import (
LyndaIE,
from .m6 import M6IE
from .macgamestore import MacGameStoreIE
from .mailru import MailRuIE
+from .makertv import MakerTVIE
from .malemotion import MalemotionIE
+from .matchtv import MatchTVIE
from .mdr import MDRIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .motorsport import MotorsportIE
from .movieclips import MovieClipsIE
from .moviezine import MoviezineIE
-from .movshare import MovShareIE
from .mtv import (
MTVIE,
MTVServicesEmbeddedIE,
NextMediaActionNewsIE,
AppleDailyIE,
)
+from .nextmovie import NextMovieIE
from .nfb import NFBIE
from .nfl import NFLIE
from .nhl import (
NHLNewsIE,
NHLVideocenterIE,
)
+from .nick import NickIE
from .niconico import NiconicoIE, NiconicoPlaylistIE
from .ninegag import NineGagIE
from .noco import NocoIE
from .normalboots import NormalbootsIE
from .nosvideo import NosVideoIE
from .nova import NovaIE
-from .novamov import NovaMovIE
+from .novamov import (
+ NovaMovIE,
+ WholeCloudIE,
+ NowVideoIE,
+ VideoWeedIE,
+ CloudTimeIE,
+)
from .nowness import (
NownessIE,
NownessPlaylistIE,
NowTVIE,
NowTVListIE,
)
-from .nowvideo import NowVideoIE
+from .noz import NozIE
from .npo import (
NPOIE,
NPOLiveIE,
NPORadioIE,
NPORadioFragmentIE,
+ SchoolTVIE,
VPROIE,
WNLIE
)
+from .npr import NprIE
from .nrk import (
NRKIE,
NRKPlaylistIE,
OoyalaIE,
OoyalaExternalIE,
)
+from .ora import OraTVIE
from .orf import (
ORFTVthekIE,
ORFOE1IE,
ORFFM4IE,
ORFIPTVIE,
)
+from .pandoratv import PandoraTVIE
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
from .pladform import PladformIE
from .played import PlayedIE
from .playfm import PlayFMIE
+from .plays import PlaysTVIE
from .playtvak import PlaytvakIE
from .playvid import PlayvidIE
from .playwire import PlaywireIE
from .pornhub import (
PornHubIE,
PornHubPlaylistIE,
+ PornHubUserVideosIE,
)
from .pornotube import PornotubeIE
from .pornovoisines import PornoVoisinesIE
from .radiojavan import RadioJavanIE
from .radiobremen import RadioBremenIE
from .radiofrance import RadioFranceIE
-from .rai import RaiIE
+from .rai import (
+ RaiTVIE,
+ RaiIE,
+)
from .rbmaradio import RBMARadioIE
from .rds import RDSIE
from .redtube import RedTubeIE
+from .regiotv import RegioTVIE
from .restudy import RestudyIE
from .reverbnation import ReverbNationIE
+from .revision3 import Revision3IE
from .ringtv import RingTVIE
from .ro220 import Ro220IE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rtbf import RTBFIE
-from .rte import RteIE
+from .rte import RteIE, RteRadioIE
from .rtlnl import RtlNlIE
from .rtl2 import RTL2IE
from .rtp import RTPIE
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE
from .rtvnh import RTVNHIE
from .ruhd import RUHDIE
+from .ruleporn import RulePornIE
from .rutube import (
RutubeIE,
RutubeChannelIE,
from .scivee import SciVeeIE
from .screencast import ScreencastIE
from .screencastomatic import ScreencastOMaticIE
+from .screenjunkies import ScreenJunkiesIE
from .screenwavemedia import ScreenwaveMediaIE, TeamFourIE
from .senateisvp import SenateISVPIE
from .servingsys import ServingSysIE
from .shared import SharedIE
from .sharesix import ShareSixIE
from .sina import SinaIE
+from .skynewsarabia import (
+ SkyNewsArabiaIE,
+ SkyNewsArabiaArticleIE,
+)
from .slideshare import SlideshareIE
from .slutload import SlutloadIE
from .smotri import (
)
from .snotr import SnotrIE
from .sohu import SohuIE
-from .soompi import (
- SoompiIE,
- SoompiShowIE,
-)
from .soundcloud import (
SoundcloudIE,
SoundcloudSetIE,
SportBoxEmbedIE,
)
from .sportdeutschland import SportDeutschlandIE
-from .srf import SrfIE
+from .srgssr import (
+ SRGSSRIE,
+ SRGSSRPlayIE,
+)
from .srmediathek import SRMediathekIE
from .ssa import SSAIE
from .stanfordoc import StanfordOpenClassroomIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
+from .tele13 import Tele13IE
from .telebruxelles import TeleBruxellesIE
from .telecinco import TelecincoIE
from .telegraaf import TelegraafIE
from .teletask import TeleTaskIE
from .tenplay import TenPlayIE
from .testurl import TestURLIE
-from .testtube import TestTubeIE
from .tf1 import TF1IE
+from .theintercept import TheInterceptIE
from .theonion import TheOnionIE
from .theplatform import (
ThePlatformIE,
from .thisamericanlife import ThisAmericanLifeIE
from .thisav import ThisAVIE
from .tinypic import TinyPicIE
-from .tlc import TlcIE, TlcDeIE
+from .tlc import TlcDeIE
from .tmz import (
TMZIE,
TMZArticleIE,
EMPFlixIE,
MovieFapIE,
)
+from .toggle import ToggleIE
from .thvideo import (
THVideoIE,
THVideoPlaylistIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
+from .trollvids import TrollvidsIE
from .trutube import TruTubeIE
from .tube8 import Tube8IE
from .tubitv import TubiTvIE
-from .tudou import TudouIE
+from .tudou import (
+ TudouIE,
+ TudouPlaylistIE,
+ TudouAlbumIE,
+)
from .tumblr import TumblrIE
-from .tunein import TuneInIE
+from .tunein import (
+ TuneInClipIE,
+ TuneInStationIE,
+ TuneInProgramIE,
+ TuneInTopicIE,
+ TuneInShortenerIE,
+)
from .turbo import TurboIE
from .tutv import TutvIE
from .tv2 import (
TVCArticleIE,
)
from .tvigle import TvigleIE
+from .tvland import TVLandIE
from .tvp import TvpIE, TvpSeriesIE
from .tvplay import TVPlayIE
from .tweakers import TweakersIE
from .twentyfourvideo import TwentyFourVideoIE
+from .twentymin import TwentyMinutenIE
from .twentytwotracks import (
TwentyTwoTracksIE,
TwentyTwoTracksGenreIE
TwitchBookmarksIE,
TwitchStreamIE,
)
-from .twitter import TwitterCardIE, TwitterIE
+from .twitter import (
+ TwitterCardIE,
+ TwitterIE,
+ TwitterAmplifyIE,
+)
from .ubu import UbuIE
from .udemy import (
UdemyIE,
UdemyCourseIE
)
from .udn import UDNEmbedIE
-from .ultimedia import UltimediaIE
+from .digiteka import DigitekaIE
from .unistra import UnistraIE
from .urort import UrortIE
from .ustream import UstreamIE, UstreamChannelIE
from .videodetective import VideoDetectiveIE
from .videofyme import VideofyMeIE
from .videomega import VideoMegaIE
+from .videomore import (
+ VideomoreIE,
+ VideomoreVideoIE,
+ VideomoreSeasonIE,
+)
from .videopremium import VideoPremiumIE
from .videott import VideoTtIE
-from .videoweed import VideoWeedIE
-from .vidme import VidmeIE
+from .vidme import (
+ VidmeIE,
+ VidmeUserIE,
+ VidmeUserLikesIE,
+)
from .vidzi import VidziIE
from .vier import VierIE, VierVideosIE
from .viewster import ViewsterIE
WebOfStoriesPlaylistIE,
)
from .weibo import WeiboIE
+from .weiqitv import WeiqiTVIE
from .wimp import WimpIE
from .wistia import WistiaIE
from .worldstarhiphop import WorldStarHipHopIE
YoutubeTruncatedIDIE,
YoutubeTruncatedURLIE,
YoutubeUserIE,
- YoutubeUserPlaylistsIE,
+ YoutubePlaylistsIE,
YoutubeWatchLaterIE,
)
from .zapiks import ZapiksIE
ZingMp3SongIE,
ZingMp3AlbumIE,
)
+from .zippcast import ZippCastIE
_ALL_CLASSES = [
klass
'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',
'description': 'md5:809ad29c67a05f54eb41f2a105693a67',
},
+ 'skip': 'this video has expired',
}, {
'url': 'http://www.abc.net.au/news/2015-08-17/warren-entsch-introduces-same-sex-marriage-bill/6702326',
'md5': 'db2a5369238b51f9811ad815b69dc086',
'title': 'Marriage Equality: Warren Entsch introduces same sex marriage bill',
},
'add_ie': ['Youtube'],
+ 'skip': 'Not accessible from Travis CI server',
}, {
'url': 'http://www.abc.net.au/news/2015-10-23/nab-lifts-interest-rates-following-westpac-and-cba/6880080',
'md5': 'b96eee7c9edf4fc5a358a0252881cc1f',
r'inline(?P<type>Video|Audio|YouTube)Data\.push\((?P<json_data>[^)]+)\);',
webpage)
if mobj is None:
+ expired = self._html_search_regex(r'(?s)class="expired-(?:video|audio)".+?<span>(.+?)</span>', webpage, 'expired', None)
+ if expired:
+ raise ExtractorError('%s said: %s' % (self.IE_NAME, expired), expected=True)
raise ExtractorError('Unable to extract video urls')
urls_info = self._parse_json(
'contentURL', webpage, 'm3u8 url', fatal=True)
formats = self._extract_m3u8_formats(m3u8, display_id, 'mp4')
- self._sort_formats(formats)
title = self._og_search_title(webpage).strip()
description = self._og_search_description(webpage).strip()
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import int_or_none
+
+
+class ACastIE(InfoExtractor):
+ IE_NAME = 'acast'
+ _VALID_URL = r'https?://(?:www\.)?acast\.com/(?P<channel>[^/]+)/(?P<id>[^/#?]+)'
+ _TEST = {
+ 'url': 'https://www.acast.com/condenasttraveler/-where-are-you-taipei-101-taiwan',
+ 'md5': 'ada3de5a1e3a2a381327d749854788bb',
+ 'info_dict': {
+ 'id': '57de3baa-4bb0-487e-9418-2692c1277a34',
+ 'ext': 'mp3',
+ 'title': '"Where Are You?": Taipei 101, Taiwan',
+ 'timestamp': 1196172000000,
+ 'description': 'md5:a0b4ef3634e63866b542e5b1199a1a0e',
+ 'duration': 211,
+ }
+ }
+
+ def _real_extract(self, url):
+ channel, display_id = re.match(self._VALID_URL, url).groups()
+
+ embed_page = self._download_webpage(
+ re.sub('(?:www\.)?acast\.com', 'embedcdn.acast.com', url), display_id)
+ cast_data = self._parse_json(self._search_regex(
+ r'window\[\'acast/queries\'\]\s*=\s*([^;]+);', embed_page, 'acast data'),
+ display_id)['GetAcast/%s/%s' % (channel, display_id)]
+
+ return {
+ 'id': compat_str(cast_data['id']),
+ 'display_id': display_id,
+ 'url': cast_data['blings'][0]['audio'],
+ 'title': cast_data['name'],
+ 'description': cast_data.get('description'),
+ 'thumbnail': cast_data.get('image'),
+ 'timestamp': int_or_none(cast_data.get('publishingDate')),
+ 'duration': int_or_none(cast_data.get('duration')),
+ }
+
+
+class ACastChannelIE(InfoExtractor):
+ IE_NAME = 'acast:channel'
+ _VALID_URL = r'https?://(?:www\.)?acast\.com/(?P<id>[^/#?]+)'
+ _TEST = {
+ 'url': 'https://www.acast.com/condenasttraveler',
+ 'info_dict': {
+ 'id': '50544219-29bb-499e-a083-6087f4cb7797',
+ 'title': 'Condé Nast Traveler Podcast',
+ 'description': 'md5:98646dee22a5b386626ae31866638fbd',
+ },
+ 'playlist_mincount': 20,
+ }
+ _API_BASE_URL = 'https://www.acast.com/api/'
+
+ @classmethod
+ def suitable(cls, url):
+ return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url)
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ channel_data = self._download_json(self._API_BASE_URL + 'channels/%s' % display_id, display_id)
+ casts = self._download_json(self._API_BASE_URL + 'channels/%s/acasts' % display_id, display_id)
+ entries = [self.url_result('https://www.acast.com/%s/%s' % (display_id, cast['url']), 'ACast') for cast in casts]
+
+ return self.playlist_result(entries, compat_str(channel_data['id']), channel_data['name'], channel_data.get('description'))
from __future__ import unicode_literals
+import re
+
from .common import InfoExtractor
+from ..compat import compat_str
from ..utils import (
parse_duration,
unified_strdate,
str_to_int,
+ int_or_none,
float_or_none,
ISO639Utils,
+ determine_ext,
)
-class AdobeTVIE(InfoExtractor):
- _VALID_URL = r'https?://tv\.adobe\.com/watch/[^/]+/(?P<id>[^/]+)'
+class AdobeTVBaseIE(InfoExtractor):
+ _API_BASE_URL = 'http://tv.adobe.com/api/v4/'
+
+
+class AdobeTVIE(AdobeTVBaseIE):
+ _VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?watch/(?P<show_urlname>[^/]+)/(?P<id>[^/]+)'
_TEST = {
'url': 'http://tv.adobe.com/watch/the-complete-picture-with-julieanne-kost/quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop/',
'md5': '9bc5727bcdd55251f35ad311ca74fa1e',
'info_dict': {
- 'id': 'quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop',
+ 'id': '10981',
'ext': 'mp4',
'title': 'Quick Tip - How to Draw a Circle Around an Object in Photoshop',
'description': 'md5:99ec318dc909d7ba2a1f2b038f7d2311',
}
def _real_extract(self, url):
- video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
-
- player = self._parse_json(
- self._search_regex(r'html5player:\s*({.+?})\s*\n', webpage, 'player'),
- video_id)
+ language, show_urlname, urlname = re.match(self._VALID_URL, url).groups()
+ if not language:
+ language = 'en'
- title = player.get('title') or self._search_regex(
- r'data-title="([^"]+)"', webpage, 'title')
- description = self._og_search_description(webpage)
- thumbnail = self._og_search_thumbnail(webpage)
-
- upload_date = unified_strdate(
- self._html_search_meta('datepublished', webpage, 'upload date'))
-
- duration = parse_duration(
- self._html_search_meta('duration', webpage, 'duration') or
- self._search_regex(
- r'Runtime:\s*(\d{2}:\d{2}:\d{2})',
- webpage, 'duration', fatal=False))
-
- view_count = str_to_int(self._search_regex(
- r'<div class="views">\s*Views?:\s*([\d,.]+)\s*</div>',
- webpage, 'view count'))
+ video_data = self._download_json(
+ self._API_BASE_URL + 'episode/get/?language=%s&show_urlname=%s&urlname=%s&disclosure=standard' % (language, show_urlname, urlname),
+ urlname)['data'][0]
formats = [{
- 'url': source['src'],
- 'format_id': source.get('quality') or source['src'].split('-')[-1].split('.')[0] or None,
- 'tbr': source.get('bitrate'),
- } for source in player['sources']]
+ 'url': source['url'],
+ 'format_id': source.get('quality_level') or source['url'].split('-')[-1].split('.')[0] or None,
+ 'width': int_or_none(source.get('width')),
+ 'height': int_or_none(source.get('height')),
+ 'tbr': int_or_none(source.get('video_data_rate')),
+ } for source in video_data['videos']]
self._sort_formats(formats)
return {
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
- 'upload_date': upload_date,
- 'duration': duration,
- 'view_count': view_count,
+ 'id': compat_str(video_data['id']),
+ 'title': video_data['title'],
+ 'description': video_data.get('description'),
+ 'thumbnail': video_data.get('thumbnail'),
+ 'upload_date': unified_strdate(video_data.get('start_date')),
+ 'duration': parse_duration(video_data.get('duration')),
+ 'view_count': str_to_int(video_data.get('playcount')),
'formats': formats,
}
+class AdobeTVPlaylistBaseIE(AdobeTVBaseIE):
+ def _parse_page_data(self, page_data):
+ return [self.url_result(self._get_element_url(element_data)) for element_data in page_data]
+
+ def _extract_playlist_entries(self, url, display_id):
+ page = self._download_json(url, display_id)
+ entries = self._parse_page_data(page['data'])
+ for page_num in range(2, page['paging']['pages'] + 1):
+ entries.extend(self._parse_page_data(
+ self._download_json(url + '&page=%d' % page_num, display_id)['data']))
+ return entries
+
+
+class AdobeTVShowIE(AdobeTVPlaylistBaseIE):
+ _VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?show/(?P<id>[^/]+)'
+
+ _TEST = {
+ 'url': 'http://tv.adobe.com/show/the-complete-picture-with-julieanne-kost',
+ 'info_dict': {
+ 'id': '36',
+ 'title': 'The Complete Picture with Julieanne Kost',
+ 'description': 'md5:fa50867102dcd1aa0ddf2ab039311b27',
+ },
+ 'playlist_mincount': 136,
+ }
+
+ def _get_element_url(self, element_data):
+ return element_data['urls'][0]
+
+ def _real_extract(self, url):
+ language, show_urlname = re.match(self._VALID_URL, url).groups()
+ if not language:
+ language = 'en'
+ query = 'language=%s&show_urlname=%s' % (language, show_urlname)
+
+ show_data = self._download_json(self._API_BASE_URL + 'show/get/?%s' % query, show_urlname)['data'][0]
+
+ return self.playlist_result(
+ self._extract_playlist_entries(self._API_BASE_URL + 'episode/?%s' % query, show_urlname),
+ compat_str(show_data['id']),
+ show_data['show_name'],
+ show_data['show_description'])
+
+
+class AdobeTVChannelIE(AdobeTVPlaylistBaseIE):
+ _VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?channel/(?P<id>[^/]+)(?:/(?P<category_urlname>[^/]+))?'
+
+ _TEST = {
+ 'url': 'http://tv.adobe.com/channel/development',
+ 'info_dict': {
+ 'id': 'development',
+ },
+ 'playlist_mincount': 96,
+ }
+
+ def _get_element_url(self, element_data):
+ return element_data['url']
+
+ def _real_extract(self, url):
+ language, channel_urlname, category_urlname = re.match(self._VALID_URL, url).groups()
+ if not language:
+ language = 'en'
+ query = 'language=%s&channel_urlname=%s' % (language, channel_urlname)
+ if category_urlname:
+ query += '&category_urlname=%s' % category_urlname
+
+ return self.playlist_result(
+ self._extract_playlist_entries(self._API_BASE_URL + 'show/?%s' % query, channel_urlname),
+ channel_urlname)
+
+
class AdobeTVVideoIE(InfoExtractor):
_VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)'
def _real_extract(self, url):
video_id = self._match_id(url)
-
- webpage = self._download_webpage(url, video_id)
-
- player_params = self._parse_json(self._search_regex(
- r'var\s+bridge\s*=\s*([^;]+);', webpage, 'player parameters'),
- video_id)
+ video_data = self._download_json(url + '?format=json', video_id)
formats = [{
+ 'format_id': '%s-%s' % (determine_ext(source['src']), source.get('height')),
'url': source['src'],
- 'width': source.get('width'),
- 'height': source.get('height'),
- 'tbr': source.get('bitrate'),
- } for source in player_params['sources']]
+ 'width': int_or_none(source.get('width')),
+ 'height': int_or_none(source.get('height')),
+ 'tbr': int_or_none(source.get('bitrate')),
+ } for source in video_data['sources']]
+ self._sort_formats(formats)
# For both metadata and downloaded files the duration varies among
# formats. I just pick the max one
duration = max(filter(None, [
float_or_none(source.get('duration'), scale=1000)
- for source in player_params['sources']]))
+ for source in video_data['sources']]))
subtitles = {}
- for translation in player_params.get('translations', []):
+ for translation in video_data.get('translations', []):
lang_id = translation.get('language_w3c') or ISO639Utils.long2short(translation['language_medium'])
if lang_id not in subtitles:
subtitles[lang_id] = []
return {
'id': video_id,
'formats': formats,
- 'title': player_params['title'],
- 'description': self._og_search_description(webpage),
+ 'title': video_data['title'],
+ 'description': video_data.get('description'),
+ 'thumbnail': video_data['video'].get('poster'),
'duration': duration,
'subtitles': subtitles,
}
'md5': '3e346a2ab0087d687a05e1e7f3b3e529',
'info_dict': {
'id': 'sY3cMUR_TbuE4YmdjzbIcQ-0',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'Tim and Eric Awesome Show Great Job! - Dr. Steve Brule, For Your Wine',
'description': 'Dr. Brule reports live from Wine Country with a special report on wines. \r\nWatch Tim and Eric Awesome Show Great Job! episode #20, "Embarrassed" on Adult Swim.\r\n\r\n',
},
'title': 'Tim and Eric Awesome Show Great Job! - Dr. Steve Brule, For Your Wine',
'description': 'Dr. Brule reports live from Wine Country with a special report on wines. \r\nWatch Tim and Eric Awesome Show Great Job! episode #20, "Embarrassed" on Adult Swim.\r\n\r\n',
},
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ }
}]
@staticmethod
media_url = file_el.text
if determine_ext(media_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
- media_url, segment_title, 'mp4', preference=0, m3u8_id='hls'))
+ media_url, segment_title, 'mp4', preference=0,
+ m3u8_id='hls', fatal=False))
else:
formats.append({
'format_id': '%s_%s' % (bitrate, ftype),
--- /dev/null
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import smuggle_url
+
+
+class AENetworksIE(InfoExtractor):
+ IE_NAME = 'aenetworks'
+ IE_DESC = 'A+E Networks: A&E, Lifetime, History.com, FYI Network'
+ _VALID_URL = r'https?://(?:www\.)?(?:(?:history|aetv|mylifetime)\.com|fyi\.tv)/(?:[^/]+/)+(?P<id>[^/]+?)(?:$|[?#])'
+
+ _TESTS = [{
+ 'url': 'http://www.history.com/topics/valentines-day/history-of-valentines-day/videos/bet-you-didnt-know-valentines-day?m=528e394da93ae&s=undefined&f=1&free=false',
+ 'info_dict': {
+ 'id': 'g12m5Gyt3fdR',
+ 'ext': 'mp4',
+ 'title': "Bet You Didn't Know: Valentine's Day",
+ 'description': 'md5:7b57ea4829b391995b405fa60bd7b5f7',
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ 'add_ie': ['ThePlatform'],
+ 'expected_warnings': ['JSON-LD'],
+ }, {
+ 'url': 'http://www.history.com/shows/mountain-men/season-1/episode-1',
+ 'info_dict': {
+ 'id': 'eg47EERs_JsZ',
+ 'ext': 'mp4',
+ 'title': 'Winter Is Coming',
+ 'description': 'md5:641f424b7a19d8e24f26dea22cf59d74',
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ 'add_ie': ['ThePlatform'],
+ }, {
+ 'url': 'http://www.aetv.com/shows/duck-dynasty/video/inlawful-entry',
+ 'only_matching': True
+ }, {
+ 'url': 'http://www.fyi.tv/shows/tiny-house-nation/videos/207-sq-ft-minnesota-prairie-cottage',
+ 'only_matching': True
+ }, {
+ 'url': 'http://www.mylifetime.com/shows/project-runway-junior/video/season-1/episode-6/superstar-clients',
+ 'only_matching': True
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ video_url_re = [
+ r'data-href="[^"]*/%s"[^>]+data-release-url="([^"]+)"' % video_id,
+ r"media_url\s*=\s*'([^']+)'"
+ ]
+ video_url = self._search_regex(video_url_re, webpage, 'video url')
+
+ info = self._search_json_ld(webpage, video_id, fatal=False)
+ info.update({
+ '_type': 'url_transparent',
+ 'url': smuggle_url(video_url, {'sig': {'key': 'crazyjava', 'secret': 's3cr3t'}}),
+ })
+ return info
+++ /dev/null
-# coding: utf-8
-from __future__ import unicode_literals
-
-from .common import InfoExtractor
-
-
-class AftenpostenIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?aftenposten\.no/webtv/(?:#!/)?video/(?P<id>\d+)'
- _TEST = {
- 'url': 'http://www.aftenposten.no/webtv/#!/video/21039/trailer-sweatshop-i-can-t-take-any-more',
- 'md5': 'fd828cd29774a729bf4d4425fe192972',
- 'info_dict': {
- 'id': '21039',
- 'ext': 'mov',
- 'title': 'TRAILER: "Sweatshop" - I can´t take any more',
- 'description': 'md5:21891f2b0dd7ec2f78d84a50e54f8238',
- 'timestamp': 1416927969,
- 'upload_date': '20141125',
- }
- }
-
- def _real_extract(self, url):
- return self.url_result('xstream:ap:%s' % self._match_id(url), 'Xstream')
from ..compat import compat_str
from ..utils import (
qualities,
+ unescapeHTML,
+ xpath_element,
)
'id': '19540403',
'ext': 'mp4',
'title': 'Planes 2 Bande-annonce VF',
- 'description': 'md5:eeaffe7c2d634525e21159b93acf3b1e',
+ 'description': 'Regardez la bande annonce du film Planes 2 (Planes 2 Bande-annonce VF). Planes 2, un film de Roberts Gannaway',
'thumbnail': 're:http://.*\.jpg',
},
}, {
'id': '19544709',
'ext': 'mp4',
'title': 'Dragons 2 - Bande annonce finale VF',
- 'description': 'md5:71742e3a74b0d692c7fce0dd2017a4ac',
+ 'description': 'md5:601d15393ac40f249648ef000720e7e3',
'thumbnail': 're:http://.*\.jpg',
},
}, {
if typ == 'film':
video_id = self._search_regex(r'href="/video/player_gen_cmedia=([0-9]+).+"', webpage, 'video id')
else:
- player = self._search_regex(r'data-player=\'([^\']+)\'>', webpage, 'data player')
-
- player_data = json.loads(player)
- video_id = compat_str(player_data['refMedia'])
+ player = self._search_regex(r'data-player=\'([^\']+)\'>', webpage, 'data player', default=None)
+ if player:
+ player_data = json.loads(player)
+ video_id = compat_str(player_data['refMedia'])
+ else:
+ model = self._search_regex(r'data-model="([^"]+)">', webpage, 'data model')
+ model_data = self._parse_json(unescapeHTML(model), display_id)
+ video_id = compat_str(model_data['id'])
xml = self._download_xml('http://www.allocine.fr/ws/AcVisiondataV4.ashx?media=%s' % video_id, display_id)
- video = xml.find('.//AcVisionVideo').attrib
+ video = xpath_element(xml, './/AcVisionVideo').attrib
quality = qualities(['ld', 'md', 'hd'])
formats = []
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+ int_or_none,
+ parse_iso8601,
+)
+
+
+class AMPIE(InfoExtractor):
+ # parse Akamai Adaptive Media Player feed
+ def _extract_feed_info(self, url):
+ item = self._download_json(
+ url, None, 'Downloading Akamai AMP feed',
+ 'Unable to download Akamai AMP feed')['channel']['item']
+
+ video_id = item['guid']
+
+ def get_media_node(name, default=None):
+ media_name = 'media-%s' % name
+ media_group = item.get('media-group') or item
+ return media_group.get(media_name) or item.get(media_name) or item.get(name, default)
+
+ thumbnails = []
+ media_thumbnail = get_media_node('thumbnail')
+ if media_thumbnail:
+ if isinstance(media_thumbnail, dict):
+ media_thumbnail = [media_thumbnail]
+ for thumbnail_data in media_thumbnail:
+ thumbnail = thumbnail_data['@attributes']
+ thumbnails.append({
+ 'url': self._proto_relative_url(thumbnail['url'], 'http:'),
+ 'width': int_or_none(thumbnail.get('width')),
+ 'height': int_or_none(thumbnail.get('height')),
+ })
+
+ subtitles = {}
+ media_subtitle = get_media_node('subTitle')
+ if media_subtitle:
+ if isinstance(media_subtitle, dict):
+ media_subtitle = [media_subtitle]
+ for subtitle_data in media_subtitle:
+ subtitle = subtitle_data['@attributes']
+ lang = subtitle.get('lang') or 'en'
+ subtitles[lang] = [{'url': subtitle['href']}]
+
+ formats = []
+ media_content = get_media_node('content')
+ if isinstance(media_content, dict):
+ media_content = [media_content]
+ for media_data in media_content:
+ media = media_data['@attributes']
+ media_type = media['type']
+ if media_type == 'video/f4m':
+ formats.extend(self._extract_f4m_formats(
+ media['url'] + '?hdcore=3.4.0&plugin=aasp-3.4.0.132.124',
+ video_id, f4m_id='hds', fatal=False))
+ elif media_type == 'application/x-mpegURL':
+ formats.extend(self._extract_m3u8_formats(
+ media['url'], video_id, 'mp4', m3u8_id='hls', fatal=False))
+ else:
+ formats.append({
+ 'format_id': media_data['media-category']['@attributes']['label'],
+ 'url': media['url'],
+ 'tbr': int_or_none(media.get('bitrate')),
+ 'filesize': int_or_none(media.get('fileSize')),
+ })
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': get_media_node('title'),
+ 'description': get_media_node('description'),
+ 'thumbnails': thumbnails,
+ 'timestamp': parse_iso8601(item.get('pubDate'), ' '),
+ 'duration': int_or_none(media_content[0].get('@attributes', {}).get('duration')),
+ 'subtitles': subtitles,
+ 'formats': formats,
+ }
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..compat import compat_urlparse
+from ..utils import (
+ determine_ext,
+ encode_dict,
+ ExtractorError,
+ sanitized_Request,
+ urlencode_postdata,
+)
+
+
+class AnimeOnDemandIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?anime-on-demand\.de/anime/(?P<id>\d+)'
+ _LOGIN_URL = 'https://www.anime-on-demand.de/users/sign_in'
+ _APPLY_HTML5_URL = 'https://www.anime-on-demand.de/html5apply'
+ _NETRC_MACHINE = 'animeondemand'
+ _TEST = {
+ 'url': 'https://www.anime-on-demand.de/anime/161',
+ 'info_dict': {
+ 'id': '161',
+ 'title': 'Grimgar, Ashes and Illusions (OmU)',
+ 'description': 'md5:6681ce3c07c7189d255ac6ab23812d31',
+ },
+ 'playlist_mincount': 4,
+ }
+
+ def _login(self):
+ (username, password) = self._get_login_info()
+ if username is None:
+ return
+
+ login_page = self._download_webpage(
+ self._LOGIN_URL, None, 'Downloading login page')
+
+ login_form = self._form_hidden_inputs('new_user', login_page)
+
+ login_form.update({
+ 'user[login]': username,
+ 'user[password]': password,
+ })
+
+ post_url = self._search_regex(
+ r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
+ 'post url', default=self._LOGIN_URL, group='url')
+
+ if not post_url.startswith('http'):
+ post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
+
+ request = sanitized_Request(
+ post_url, urlencode_postdata(encode_dict(login_form)))
+ request.add_header('Referer', self._LOGIN_URL)
+
+ response = self._download_webpage(
+ request, None, 'Logging in as %s' % username)
+
+ if all(p not in response for p in ('>Logout<', 'href="/users/sign_out"')):
+ error = self._search_regex(
+ r'<p class="alert alert-danger">(.+?)</p>',
+ response, 'error', default=None)
+ if error:
+ raise ExtractorError('Unable to login: %s' % error, expected=True)
+ raise ExtractorError('Unable to log in')
+
+ def _real_initialize(self):
+ self._login()
+
+ def _real_extract(self, url):
+ anime_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, anime_id)
+
+ if 'data-playlist=' not in webpage:
+ self._download_webpage(
+ self._APPLY_HTML5_URL, anime_id,
+ 'Activating HTML5 beta', 'Unable to apply HTML5 beta')
+ webpage = self._download_webpage(url, anime_id)
+
+ csrf_token = self._html_search_meta(
+ 'csrf-token', webpage, 'csrf token', fatal=True)
+
+ anime_title = self._html_search_regex(
+ r'(?s)<h1[^>]+itemprop="name"[^>]*>(.+?)</h1>',
+ webpage, 'anime name')
+ anime_description = self._html_search_regex(
+ r'(?s)<div[^>]+itemprop="description"[^>]*>(.+?)</div>',
+ webpage, 'anime description', default=None)
+
+ entries = []
+
+ for episode_html in re.findall(r'(?s)<h3[^>]+class="episodebox-title".+?>Episodeninhalt<', webpage):
+ m = re.search(
+ r'class="episodebox-title"[^>]+title="Episode (?P<number>\d+) - (?P<title>.+?)"', episode_html)
+ if not m:
+ continue
+
+ episode_number = int(m.group('number'))
+ episode_title = m.group('title')
+ video_id = 'episode-%d' % episode_number
+
+ common_info = {
+ 'id': video_id,
+ 'series': anime_title,
+ 'episode': episode_title,
+ 'episode_number': episode_number,
+ }
+
+ formats = []
+
+ playlist_url = self._search_regex(
+ r'data-playlist=(["\'])(?P<url>.+?)\1',
+ episode_html, 'data playlist', default=None, group='url')
+ if playlist_url:
+ request = sanitized_Request(
+ compat_urlparse.urljoin(url, playlist_url),
+ headers={
+ 'X-Requested-With': 'XMLHttpRequest',
+ 'X-CSRF-Token': csrf_token,
+ 'Referer': url,
+ 'Accept': 'application/json, text/javascript, */*; q=0.01',
+ })
+
+ playlist = self._download_json(
+ request, video_id, 'Downloading playlist JSON', fatal=False)
+ if playlist:
+ playlist = playlist['playlist'][0]
+ title = playlist['title']
+ description = playlist.get('description')
+ for source in playlist.get('sources', []):
+ file_ = source.get('file')
+ if file_ and determine_ext(file_) == 'm3u8':
+ formats = self._extract_m3u8_formats(
+ file_, video_id, 'mp4',
+ entry_protocol='m3u8_native', m3u8_id='hls')
+
+ if formats:
+ f = common_info.copy()
+ f.update({
+ 'title': title,
+ 'description': description,
+ 'formats': formats,
+ })
+ entries.append(f)
+
+ m = re.search(
+ r'data-dialog-header=(["\'])(?P<title>.+?)\1[^>]+href=(["\'])(?P<href>.+?)\3[^>]*>Teaser<',
+ episode_html)
+ if m:
+ f = common_info.copy()
+ f.update({
+ 'id': '%s-teaser' % f['id'],
+ 'title': m.group('title'),
+ 'url': compat_urlparse.urljoin(url, m.group('href')),
+ })
+ entries.append(f)
+
+ return self.playlist_result(entries, anime_id, anime_title, anime_description)
from __future__ import unicode_literals
-import re
+from .nuevo import NuevoBaseIE
-from .common import InfoExtractor
-
-class AnitubeIE(InfoExtractor):
+class AnitubeIE(NuevoBaseIE):
IE_NAME = 'anitube.se'
_VALID_URL = r'https?://(?:www\.)?anitube\.se/video/(?P<id>\d+)'
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
key = self._search_regex(
r'src=["\']https?://[^/]+/embed/([A-Za-z0-9_-]+)', webpage, 'key')
- config_xml = self._download_xml(
- 'http://www.anitube.se/nuevo/econfig.php?key=%s' % key, key)
-
- video_title = config_xml.find('title').text
- thumbnail = config_xml.find('image').text
- duration = float(config_xml.find('duration').text)
-
- formats = []
- video_url = config_xml.find('file')
- if video_url is not None:
- formats.append({
- 'format_id': 'sd',
- 'url': video_url.text,
- })
- video_url = config_xml.find('filehd')
- if video_url is not None:
- formats.append({
- 'format_id': 'hd',
- 'url': video_url.text,
- })
-
- return {
- 'id': video_id,
- 'title': video_title,
- 'thumbnail': thumbnail,
- 'duration': duration,
- 'formats': formats
- }
+ return self._extract_nuevo(
+ 'http://www.anitube.se/nuevo/econfig.php?key=%s' % key, video_id)
class AppleTrailersIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/(?:trailers|ca)/(?P<company>[^/]+)/(?P<movie>[^/]+)'
+ IE_NAME = 'appletrailers'
+ _VALID_URL = r'https?://(?:www\.|movie)?trailers\.apple\.com/(?:trailers|ca)/(?P<company>[^/]+)/(?P<movie>[^/]+)'
_TESTS = [{
'url': 'http://trailers.apple.com/trailers/wb/manofsteel/',
'info_dict': {
},
},
]
+ }, {
+ 'url': 'http://trailers.apple.com/trailers/magnolia/blackthorn/',
+ 'info_dict': {
+ 'id': 'blackthorn',
+ },
+ 'playlist_mincount': 2,
}, {
'url': 'http://trailers.apple.com/ca/metropole/autrui/',
'only_matching': True,
+ }, {
+ 'url': 'http://movietrailers.apple.com/trailers/focus_features/kuboandthetwostrings/',
+ 'only_matching': True,
}]
_JSON_RE = r'iTunes.playURL\((.*?)\);'
def fix_html(s):
s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s)
- s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s)
+ s = re.sub(r'<img ([^<]*?)/?>', r'<img \1/>', s)
# The ' in the onClick attributes are not escaped, it couldn't be parsed
# like: http://trailers.apple.com/trailers/wb/gravity/
trailer_info_json = self._search_regex(self._JSON_RE,
on_click, 'trailer info')
trailer_info = json.loads(trailer_info_json)
+ first_url = trailer_info.get('url')
+ if not first_url:
+ continue
title = trailer_info['title']
video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower()
thumbnail = li.find('.//img').attrib['src']
if m:
duration = 60 * int(m.group('minutes')) + int(m.group('seconds'))
- first_url = trailer_info['url']
trailer_id = first_url.split('/')[-1].rpartition('_')[0].lower()
settings_json_url = compat_urlparse.urljoin(url, 'includes/settings/%s.json' % trailer_id)
settings = self._download_json(settings_json_url, trailer_id, 'Downloading settings json')
'id': movie,
'entries': playlist,
}
+
+
+class AppleTrailersSectionIE(InfoExtractor):
+ IE_NAME = 'appletrailers:section'
+ _SECTIONS = {
+ 'justadded': {
+ 'feed_path': 'just_added',
+ 'title': 'Just Added',
+ },
+ 'exclusive': {
+ 'feed_path': 'exclusive',
+ 'title': 'Exclusive',
+ },
+ 'justhd': {
+ 'feed_path': 'just_hd',
+ 'title': 'Just HD',
+ },
+ 'mostpopular': {
+ 'feed_path': 'most_pop',
+ 'title': 'Most Popular',
+ },
+ 'moviestudios': {
+ 'feed_path': 'studios',
+ 'title': 'Movie Studios',
+ },
+ }
+ _VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/#section=(?P<id>%s)' % '|'.join(_SECTIONS)
+ _TESTS = [{
+ 'url': 'http://trailers.apple.com/#section=justadded',
+ 'info_dict': {
+ 'title': 'Just Added',
+ 'id': 'justadded',
+ },
+ 'playlist_mincount': 80,
+ }, {
+ 'url': 'http://trailers.apple.com/#section=exclusive',
+ 'info_dict': {
+ 'title': 'Exclusive',
+ 'id': 'exclusive',
+ },
+ 'playlist_mincount': 80,
+ }, {
+ 'url': 'http://trailers.apple.com/#section=justhd',
+ 'info_dict': {
+ 'title': 'Just HD',
+ 'id': 'justhd',
+ },
+ 'playlist_mincount': 80,
+ }, {
+ 'url': 'http://trailers.apple.com/#section=mostpopular',
+ 'info_dict': {
+ 'title': 'Most Popular',
+ 'id': 'mostpopular',
+ },
+ 'playlist_mincount': 80,
+ }, {
+ 'url': 'http://trailers.apple.com/#section=moviestudios',
+ 'info_dict': {
+ 'title': 'Movie Studios',
+ 'id': 'moviestudios',
+ },
+ 'playlist_mincount': 80,
+ }]
+
+ def _real_extract(self, url):
+ section = self._match_id(url)
+ section_data = self._download_json(
+ 'http://trailers.apple.com/trailers/home/feeds/%s.json' % self._SECTIONS[section]['feed_path'],
+ section)
+ entries = [
+ self.url_result('http://trailers.apple.com' + e['location'])
+ for e in section_data]
+ return self.playlist_result(entries, section, self._SECTIONS[section]['title'])
server = stream.get('_server')
for stream_url in stream_urls:
ext = determine_ext(stream_url)
+ if quality != 'auto' and ext in ('f4m', 'm3u8'):
+ continue
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
stream_url + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124',
- video_id, preference=-1, f4m_id='hds'))
+ video_id, preference=-1, f4m_id='hds', fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
- stream_url, video_id, 'mp4', preference=1, m3u8_id='hls'))
+ stream_url, video_id, 'mp4', preference=1, m3u8_id='hls', fatal=False))
else:
if server and server.startswith('rtmp'):
f = {
unified_strdate,
get_element_by_attribute,
int_or_none,
+ NO_DEFAULT,
qualities,
)
class ArteTvIE(InfoExtractor):
- _VALID_URL = r'http://videos\.arte\.tv/(?P<lang>fr|de)/.*-(?P<id>.*?)\.html'
+ _VALID_URL = r'http://videos\.arte\.tv/(?P<lang>fr|de|en|es)/.*-(?P<id>.*?)\.html'
IE_NAME = 'arte.tv'
def _real_extract(self, url):
class ArteTVPlus7IE(InfoExtractor):
IE_NAME = 'arte.tv:+7'
- _VALID_URL = r'https?://(?:www\.)?arte\.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
+ _VALID_URL = r'https?://(?:www\.)?arte\.tv/guide/(?P<lang>fr|de|en|es)/(?:(?:sendungen|emissions|embed)/)?(?P<id>[^/]+)/(?P<name>[^/?#&+])'
@classmethod
def _extract_url_info(cls, url):
mobj = re.match(cls._VALID_URL, url)
lang = mobj.group('lang')
- # This is not a real id, it can be for example AJT for the news
- # http://www.arte.tv/guide/fr/emissions/AJT/arte-journal
- video_id = mobj.group('id')
+ query = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+ if 'vid' in query:
+ video_id = query['vid'][0]
+ else:
+ # This is not a real id, it can be for example AJT for the news
+ # http://www.arte.tv/guide/fr/emissions/AJT/arte-journal
+ video_id = mobj.group('id')
return video_id, lang
def _real_extract(self, url):
return self._extract_from_webpage(webpage, video_id, lang)
def _extract_from_webpage(self, webpage, video_id, lang):
+ patterns_templates = (r'arte_vp_url=["\'](.*?%s.*?)["\']', r'data-url=["\']([^"]+%s[^"]+)["\']')
+ ids = (video_id, '')
+ # some pages contain multiple videos (like
+ # http://www.arte.tv/guide/de/sendungen/XEN/xenius/?vid=055918-015_PLUS7-D),
+ # so we first try to look for json URLs that contain the video id from
+ # the 'vid' parameter.
+ patterns = [t % re.escape(_id) for _id in ids for t in patterns_templates]
json_url = self._html_search_regex(
- [r'arte_vp_url=["\'](.*?)["\']', r'data-url=["\']([^"]+)["\']'],
- webpage, 'json vp url', default=None)
+ patterns, webpage, 'json vp url', default=None)
if not json_url:
- iframe_url = self._html_search_regex(
- r'<iframe[^>]+src=(["\'])(?P<url>.+\bjson_url=.+?)\1',
- webpage, 'iframe url', group='url')
- json_url = compat_parse_qs(
- compat_urllib_parse_urlparse(iframe_url).query)['json_url'][0]
- return self._extract_from_json_url(json_url, video_id, lang)
+ def find_iframe_url(webpage, default=NO_DEFAULT):
+ return self._html_search_regex(
+ r'<iframe[^>]+src=(["\'])(?P<url>.+\bjson_url=.+?)\1',
+ webpage, 'iframe url', group='url', default=default)
+
+ iframe_url = find_iframe_url(webpage, None)
+ if not iframe_url:
+ embed_url = self._html_search_regex(
+ r'arte_vp_url_oembed=\'([^\']+?)\'', webpage, 'embed url', default=None)
+ if embed_url:
+ player = self._download_json(
+ embed_url, video_id, 'Downloading player page')
+ iframe_url = find_iframe_url(player['html'])
+ # en and es URLs produce react-based pages with different layout (e.g.
+ # http://www.arte.tv/guide/en/053330-002-A/carnival-italy?zone=world)
+ if not iframe_url:
+ program = self._search_regex(
+ r'program\s*:\s*({.+?["\']embed_html["\'].+?}),?\s*\n',
+ webpage, 'program', default=None)
+ if program:
+ embed_html = self._parse_json(program, video_id)
+ if embed_html:
+ iframe_url = find_iframe_url(embed_html['embed_html'])
+ if iframe_url:
+ json_url = compat_parse_qs(
+ compat_urllib_parse_urlparse(iframe_url).query)['json_url'][0]
+ if json_url:
+ return self._extract_from_json_url(json_url, video_id, lang)
+ # Differend kind of embed URL (e.g.
+ # http://www.arte.tv/magazine/trepalium/fr/episode-0406-replay-trepalium)
+ embed_url = self._search_regex(
+ r'<iframe[^>]+src=(["\'])(?P<url>.+?)\1',
+ webpage, 'embed url', group='url')
+ return self.url_result(embed_url)
def _extract_from_json_url(self, json_url, video_id, lang):
info = self._download_json(json_url, video_id)
upload_date_str = player_info.get('shootingDate')
if not upload_date_str:
- upload_date_str = player_info.get('VDA', '').split(' ')[0]
+ upload_date_str = (player_info.get('VRA') or player_info.get('VDA') or '').split(' ')[0]
title = player_info['VTI'].strip()
subtitle = player_info.get('VSU', '').strip()
}
qfunc = qualities(['HQ', 'MQ', 'EQ', 'SQ'])
+ LANGS = {
+ 'fr': 'F',
+ 'de': 'A',
+ 'en': 'E[ANG]',
+ 'es': 'E[ESP]',
+ }
+
formats = []
for format_id, format_dict in player_info['VSR'].items():
f = dict(format_dict)
versionCode = f.get('versionCode')
-
- langcode = {
- 'fr': 'F',
- 'de': 'A',
- }.get(lang, lang)
- lang_rexs = [r'VO?%s' % langcode, r'VO?.-ST%s' % langcode]
- lang_pref = (
- None if versionCode is None else (
- 10 if any(re.match(r, versionCode) for r in lang_rexs)
- else -10))
+ langcode = LANGS.get(lang, lang)
+ lang_rexs = [r'VO?%s-' % re.escape(langcode), r'VO?.-ST%s$' % re.escape(langcode)]
+ lang_pref = None
+ if versionCode:
+ matched_lang_rexs = [r for r in lang_rexs if re.match(r, versionCode)]
+ lang_pref = -10 if not matched_lang_rexs else 10 * len(matched_lang_rexs)
source_pref = 0
if versionCode is not None:
# The original version with subtitles has lower relevance
- if re.match(r'VO-ST(F|A)', versionCode):
+ if re.match(r'VO-ST(F|A|E)', versionCode):
source_pref -= 10
# The version with sourds/mal subtitles has also lower relevance
- elif re.match(r'VO?(F|A)-STM\1', versionCode):
+ elif re.match(r'VO?(F|A|E)-STM\1', versionCode):
source_pref -= 9
format = {
'format_id': format_id,
# It also uses the arte_vp_url url from the webpage to extract the information
class ArteTVCreativeIE(ArteTVPlus7IE):
IE_NAME = 'arte.tv:creative'
- _VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/(?:magazine?/)?(?P<id>[^?#]+)'
+ _VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de|en|es)/(?:magazine?/)?(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
class ArteTVFutureIE(ArteTVPlus7IE):
IE_NAME = 'arte.tv:future'
- _VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de)/(thema|sujet)/.*?#article-anchor-(?P<id>\d+)'
+ _VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de|en|es)/(?P<id>[^/?#&]+)'
- _TEST = {
- 'url': 'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081',
+ _TESTS = [{
+ 'url': 'http://future.arte.tv/fr/info-sciences/les-ecrevisses-aussi-sont-anxieuses',
'info_dict': {
- 'id': '5201',
+ 'id': '050940-028-A',
'ext': 'mp4',
- 'title': 'Les champignons au secours de la planète',
- 'upload_date': '20131101',
+ 'title': 'Les écrevisses aussi peuvent être anxieuses',
+ 'upload_date': '20140902',
},
- }
-
- def _real_extract(self, url):
- anchor_id, lang = self._extract_url_info(url)
- webpage = self._download_webpage(url, anchor_id)
- row = self._search_regex(
- r'(?s)id="%s"[^>]*>.+?(<div[^>]*arte_vp_url[^>]*>)' % anchor_id,
- webpage, 'row')
- return self._extract_from_webpage(row, anchor_id, lang)
+ }, {
+ 'url': 'http://future.arte.tv/fr/la-science-est-elle-responsable',
+ 'only_matching': True,
+ }]
class ArteTVDDCIE(ArteTVPlus7IE):
IE_NAME = 'arte.tv:ddc'
- _VALID_URL = r'https?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>.+)'
+ _VALID_URL = r'https?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>[^/?#&]+)'
def _real_extract(self, url):
video_id, lang = self._extract_url_info(url)
class ArteTVConcertIE(ArteTVPlus7IE):
IE_NAME = 'arte.tv:concert'
- _VALID_URL = r'https?://concert\.arte\.tv/(?P<lang>de|fr)/(?P<id>.+)'
+ _VALID_URL = r'https?://concert\.arte\.tv/(?P<lang>fr|de|en|es)/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'http://concert.arte.tv/de/notwist-im-pariser-konzertclub-divan-du-monde',
}
+class ArteTVCinemaIE(ArteTVPlus7IE):
+ IE_NAME = 'arte.tv:cinema'
+ _VALID_URL = r'https?://cinema\.arte\.tv/(?P<lang>fr|de|en|es)/(?P<id>.+)'
+
+ _TEST = {
+ 'url': 'http://cinema.arte.tv/de/node/38291',
+ 'md5': '6b275511a5107c60bacbeeda368c3aa1',
+ 'info_dict': {
+ 'id': '055876-000_PWA12025-D',
+ 'ext': 'mp4',
+ 'title': 'Tod auf dem Nil',
+ 'upload_date': '20160122',
+ 'description': 'md5:7f749bbb77d800ef2be11d54529b96bc',
+ },
+ }
+
+
+class ArteTVMagazineIE(ArteTVPlus7IE):
+ IE_NAME = 'arte.tv:magazine'
+ _VALID_URL = r'https?://(?:www\.)?arte\.tv/magazine/[^/]+/(?P<lang>fr|de|en|es)/(?P<id>[^/?#&]+)'
+
+ _TESTS = [{
+ # Embedded via <iframe src="http://www.arte.tv/arte_vp/index.php?json_url=..."
+ 'url': 'http://www.arte.tv/magazine/trepalium/fr/entretien-avec-le-realisateur-vincent-lannoo-trepalium',
+ 'md5': '2a9369bcccf847d1c741e51416299f25',
+ 'info_dict': {
+ 'id': '065965-000-A',
+ 'ext': 'mp4',
+ 'title': 'Trepalium - Extrait Ep.01',
+ 'upload_date': '20160121',
+ },
+ }, {
+ # Embedded via <iframe src="http://www.arte.tv/guide/fr/embed/054813-004-A/medium"
+ 'url': 'http://www.arte.tv/magazine/trepalium/fr/episode-0406-replay-trepalium',
+ 'md5': 'fedc64fc7a946110fe311634e79782ca',
+ 'info_dict': {
+ 'id': '054813-004_PLUS7-F',
+ 'ext': 'mp4',
+ 'title': 'Trepalium (4/6)',
+ 'description': 'md5:10057003c34d54e95350be4f9b05cb40',
+ 'upload_date': '20160218',
+ },
+ }, {
+ 'url': 'http://www.arte.tv/magazine/metropolis/de/frank-woeste-german-paris-metropolis',
+ 'only_matching': True,
+ }]
+
+
class ArteTVEmbedIE(ArteTVPlus7IE):
IE_NAME = 'arte.tv:embed'
_VALID_URL = r'''(?x)
import time
import hmac
+import hashlib
+import re
from .common import InfoExtractor
from ..compat import (
'duration': 5527.6,
'thumbnail': 're:^https?://.*\.jpg$',
},
+ 'skip': 'This video is only available for registered users'
+ },
+ {
+ 'url': 'http://www.atresplayer.com/television/especial/videoencuentros/temporada-1/capitulo-112-david-bustamante_2014121600375.html',
+ 'md5': '0d0e918533bbd4b263f2de4d197d4aac',
+ 'info_dict': {
+ 'id': 'capitulo-112-david-bustamante',
+ 'ext': 'flv',
+ 'title': 'David Bustamante',
+ 'description': 'md5:f33f1c0a05be57f6708d4dd83a3b81c6',
+ 'duration': 1439.0,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ },
},
{
'url': 'http://www.atresplayer.com/television/series/el-secreto-de-puente-viejo/el-chico-de-los-tres-lunares/capitulo-977-29-12-14_2014122400174.html',
_LOGIN_URL = 'https://servicios.atresplayer.com/j_spring_security_check'
+ _ERRORS = {
+ 'UNPUBLISHED': 'We\'re sorry, but this video is not yet available.',
+ 'DELETED': 'This video has expired and is no longer available for online streaming.',
+ 'GEOUNPUBLISHED': 'We\'re sorry, but this video is not available in your region due to right restrictions.',
+ # 'PREMIUM': 'PREMIUM',
+ }
+
def _real_initialize(self):
self._login()
episode_id = self._search_regex(
r'episode="([^"]+)"', webpage, 'episode id')
+ request = sanitized_Request(
+ self._PLAYER_URL_TEMPLATE % episode_id,
+ headers={'User-Agent': self._USER_AGENT})
+ player = self._download_json(request, episode_id, 'Downloading player JSON')
+
+ episode_type = player.get('typeOfEpisode')
+ error_message = self._ERRORS.get(episode_type)
+ if error_message:
+ raise ExtractorError(
+ '%s returned error: %s' % (self.IE_NAME, error_message), expected=True)
+
+ formats = []
+ video_url = player.get('urlVideo')
+ if video_url:
+ format_info = {
+ 'url': video_url,
+ 'format_id': 'http',
+ }
+ mobj = re.search(r'(?P<bitrate>\d+)K_(?P<width>\d+)x(?P<height>\d+)', video_url)
+ if mobj:
+ format_info.update({
+ 'width': int_or_none(mobj.group('width')),
+ 'height': int_or_none(mobj.group('height')),
+ 'tbr': int_or_none(mobj.group('bitrate')),
+ })
+ formats.append(format_info)
+
timestamp = int_or_none(self._download_webpage(
self._TIME_API_URL,
video_id, 'Downloading timestamp', fatal=False), 1000, time.time())
timestamp_shifted = compat_str(timestamp + self._TIMESTAMP_SHIFT)
token = hmac.new(
self._MAGIC.encode('ascii'),
- (episode_id + timestamp_shifted).encode('utf-8')
+ (episode_id + timestamp_shifted).encode('utf-8'), hashlib.md5
).hexdigest()
- formats = []
- for fmt in ['windows', 'android_tablet']:
- request = sanitized_Request(
- self._URL_VIDEO_TEMPLATE.format(fmt, episode_id, timestamp_shifted, token))
- request.add_header('User-Agent', self._USER_AGENT)
-
- fmt_json = self._download_json(
- request, video_id, 'Downloading %s video JSON' % fmt)
-
- result = fmt_json.get('resultDes')
- if result.lower() != 'ok':
- raise ExtractorError(
- '%s returned error: %s' % (self.IE_NAME, result), expected=True)
-
- for format_id, video_url in fmt_json['resultObject'].items():
- if format_id == 'token' or not video_url.startswith('http'):
- continue
- if video_url.endswith('/Manifest'):
- if 'geodeswowsmpra3player' in video_url:
- f4m_path = video_url.split('smil:', 1)[-1].split('free_', 1)[0]
- f4m_url = 'http://drg.antena3.com/{0}hds/es/sd.f4m'.format(f4m_path)
- # this videos are protected by DRM, the f4m downloader doesn't support them
- continue
- else:
- f4m_url = video_url[:-9] + '/manifest.f4m'
- formats.extend(self._extract_f4m_formats(f4m_url, video_id))
- else:
- formats.append({
- 'url': video_url,
- 'format_id': 'android-%s' % format_id,
- 'preference': 1,
- })
- self._sort_formats(formats)
+ request = sanitized_Request(
+ self._URL_VIDEO_TEMPLATE.format('windows', episode_id, timestamp_shifted, token),
+ headers={'User-Agent': self._USER_AGENT})
- player = self._download_json(
- self._PLAYER_URL_TEMPLATE % episode_id,
- episode_id)
+ fmt_json = self._download_json(
+ request, video_id, 'Downloading windows video JSON')
+
+ result = fmt_json.get('resultDes')
+ if result.lower() != 'ok':
+ raise ExtractorError(
+ '%s returned error: %s' % (self.IE_NAME, result), expected=True)
+
+ for format_id, video_url in fmt_json['resultObject'].items():
+ if format_id == 'token' or not video_url.startswith('http'):
+ continue
+ if 'geodeswowsmpra3player' in video_url:
+ f4m_path = video_url.split('smil:', 1)[-1].split('free_', 1)[0]
+ f4m_url = 'http://drg.antena3.com/{0}hds/es/sd.f4m'.format(f4m_path)
+ # this videos are protected by DRM, the f4m downloader doesn't support them
+ continue
+ else:
+ f4m_url = video_url[:-9] + '/manifest.f4m'
+ formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
+ self._sort_formats(formats)
path_data = player.get('pathData')
episode = self._download_xml(
- self._EPISODE_URL_TEMPLATE % path_data,
- video_id, 'Downloading episode XML')
+ self._EPISODE_URL_TEMPLATE % path_data, video_id,
+ 'Downloading episode XML')
duration = float_or_none(xpath_text(
episode, './media/asset/info/technical/contentDuration', 'duration'))
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+ int_or_none,
+ parse_iso8601,
+ sanitized_Request,
+)
+
+
+class AudiMediaIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?audimedia\.tv/(?:en|de)/vid/(?P<id>[^/?#]+)'
+ _TEST = {
+ 'url': 'https://audimedia.tv/en/vid/60-seconds-of-audi-sport-104-2015-wec-bahrain-rookie-test',
+ 'md5': '79a8b71c46d49042609795ab59779b66',
+ 'info_dict': {
+ 'id': '1565',
+ 'ext': 'mp4',
+ 'title': '60 Seconds of Audi Sport 104/2015 - WEC Bahrain, Rookie Test',
+ 'description': 'md5:60e5d30a78ced725f7b8d34370762941',
+ 'upload_date': '20151124',
+ 'timestamp': 1448354940,
+ 'duration': 74022,
+ 'view_count': int,
+ }
+ }
+ # extracted from https://audimedia.tv/assets/embed/embedded-player.js (dataSourceAuthToken)
+ _AUTH_TOKEN = 'e25b42847dba18c6c8816d5d8ce94c326e06823ebf0859ed164b3ba169be97f2'
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+
+ raw_payload = self._search_regex(r'<script[^>]+class="amtv-embed"[^>]+id="([^"]+)"', webpage, 'raw payload')
+ _, stage_mode, video_id, lang = raw_payload.split('-')
+
+ # TODO: handle s and e stage_mode (live streams and ended live streams)
+ if stage_mode not in ('s', 'e'):
+ request = sanitized_Request(
+ 'https://audimedia.tv/api/video/v1/videos/%s?embed[]=video_versions&embed[]=thumbnail_image&where[content_language_iso]=%s' % (video_id, lang),
+ headers={'X-Auth-Token': self._AUTH_TOKEN})
+ json_data = self._download_json(request, video_id)['results']
+ formats = []
+
+ stream_url_hls = json_data.get('stream_url_hls')
+ if stream_url_hls:
+ formats.extend(self._extract_m3u8_formats(
+ stream_url_hls, video_id, 'mp4',
+ entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
+
+ stream_url_hds = json_data.get('stream_url_hds')
+ if stream_url_hds:
+ formats.extend(self._extract_f4m_formats(
+ stream_url_hds + '?hdcore=3.4.0',
+ video_id, f4m_id='hds', fatal=False))
+
+ for video_version in json_data.get('video_versions'):
+ video_version_url = video_version.get('download_url') or video_version.get('stream_url')
+ if not video_version_url:
+ continue
+ formats.append({
+ 'url': video_version_url,
+ 'width': int_or_none(video_version.get('width')),
+ 'height': int_or_none(video_version.get('height')),
+ 'abr': int_or_none(video_version.get('audio_bitrate')),
+ 'vbr': int_or_none(video_version.get('video_bitrate')),
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': json_data['title'],
+ 'description': json_data.get('subtitle'),
+ 'thumbnail': json_data.get('thumbnail_image', {}).get('file'),
+ 'timestamp': parse_iso8601(json_data.get('publication_date')),
+ 'duration': int_or_none(json_data.get('duration')),
+ 'view_count': int_or_none(json_data.get('view_count')),
+ 'formats': formats,
+ }
# API is inconsistent with errors
if 'url' not in api_response or not api_response['url'] or 'error' in api_response:
- raise ExtractorError('Invalid url %s', url)
+ raise ExtractorError('Invalid url %s' % url)
# Audiomack wraps a lot of soundcloud tracks in their branded wrapper
# if so, pass the work off to the soundcloud extractor
import json
from .common import InfoExtractor
-from ..utils import float_or_none
+from ..utils import (
+ ExtractorError,
+ float_or_none,
+ sanitized_Request,
+)
class AzubuIE(InfoExtractor):
'view_count': view_count,
'formats': formats,
}
+
+
+class AzubuLiveIE(InfoExtractor):
+ _VALID_URL = r'http://www.azubu.tv/(?P<id>[^/]+)$'
+
+ _TEST = {
+ 'url': 'http://www.azubu.tv/MarsTVMDLen',
+ 'only_matching': True,
+ }
+
+ def _real_extract(self, url):
+ user = self._match_id(url)
+
+ info = self._download_json(
+ 'http://api.azubu.tv/public/modules/last-video/{0}/info'.format(user),
+ user)['data']
+ if info['type'] != 'STREAM':
+ raise ExtractorError('{0} is not streaming live'.format(user), expected=True)
+
+ req = sanitized_Request(
+ 'https://edge-elb.api.brightcove.com/playback/v1/accounts/3361910549001/videos/ref:' + info['reference_id'])
+ req.add_header('Accept', 'application/json;pk=BCpkADawqM1gvI0oGWg8dxQHlgT8HkdE2LnAlWAZkOlznO39bSZX726u4JqnDsK3MDXcO01JxXK2tZtJbgQChxgaFzEVdHRjaDoxaOu8hHOO8NYhwdxw9BzvgkvLUlpbDNUuDoc4E4wxDToV')
+ bc_info = self._download_json(req, user)
+ m3u8_url = next(source['src'] for source in bc_info['sources'] if source['container'] == 'M2TS')
+ formats = self._extract_m3u8_formats(m3u8_url, user, ext='mp4')
+
+ return {
+ 'id': info['id'],
+ 'title': self._live_title(info['title']),
+ 'uploader_id': user,
+ 'formats': formats,
+ 'is_live': True,
+ 'thumbnail': bc_info['poster'],
+ }
import re
from .common import InfoExtractor
-from ..compat import compat_urlparse
+from ..utils import unescapeHTML
class BaiduVideoIE(InfoExtractor):
'url': 'http://v.baidu.com/comic/1069.htm?frp=bdbrand&q=%E4%B8%AD%E5%8D%8E%E5%B0%8F%E5%BD%93%E5%AE%B6',
'info_dict': {
'id': '1069',
- 'title': '中华小当家 TV版 (全52集)',
- 'description': 'md5:395a419e41215e531c857bb037bbaf80',
+ 'title': '中华小当家 TV版国语',
+ 'description': 'md5:51be07afe461cf99fa61231421b5397c',
},
'playlist_count': 52,
}, {
'title': 're:^奔跑吧兄弟',
'description': 'md5:1bf88bad6d850930f542d51547c089b8',
},
- 'playlist_mincount': 3,
+ 'playlist_mincount': 12,
}]
+ def _call_api(self, path, category, playlist_id, note):
+ return self._download_json('http://app.video.baidu.com/%s/?worktype=adnative%s&id=%s' % (
+ path, category, playlist_id), playlist_id, note)
+
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- playlist_id = mobj.group('id')
- category = category2 = mobj.group('type')
+ category, playlist_id = re.match(self._VALID_URL, url).groups()
if category == 'show':
- category2 = 'tvshow'
-
- webpage = self._download_webpage(url, playlist_id)
-
- playlist_title = self._html_search_regex(
- r'title\s*:\s*(["\'])(?P<title>[^\']+)\1', webpage,
- 'playlist title', group='title')
- playlist_description = self._html_search_regex(
- r'<input[^>]+class="j-data-intro"[^>]+value="([^"]+)"/>', webpage,
- playlist_id, 'playlist description')
+ category = 'tvshow'
+ if category == 'tv':
+ category = 'tvplay'
- site = self._html_search_regex(
- r'filterSite\s*:\s*["\']([^"]*)["\']', webpage,
- 'primary provider site')
- api_result = self._download_json(
- 'http://v.baidu.com/%s_intro/?dtype=%sPlayUrl&id=%s&site=%s' % (
- category, category2, playlist_id, site),
- playlist_id, 'Get playlist links')
+ playlist_detail = self._call_api(
+ 'xqinfo', category, playlist_id, 'Download playlist JSON metadata')
- entries = []
- for episode in api_result[0]['episodes']:
- episode_id = '%s_%s' % (playlist_id, episode['episode'])
+ playlist_title = playlist_detail['title']
+ playlist_description = unescapeHTML(playlist_detail.get('intro'))
- redirect_page = self._download_webpage(
- compat_urlparse.urljoin(url, episode['url']), episode_id,
- note='Download Baidu redirect page')
- real_url = self._html_search_regex(
- r'location\.replace\("([^"]+)"\)', redirect_page, 'real URL')
+ episodes_detail = self._call_api(
+ 'xqsingle', category, playlist_id, 'Download episodes JSON metadata')
- entries.append(self.url_result(
- real_url, video_title=episode['single_title']))
+ entries = [self.url_result(
+ episode['url'], video_title=episode['title']
+ ) for episode in episodes_detail['videos']]
return self.playlist_result(
entries, playlist_id, playlist_title, playlist_description)
class BBCCoUkIE(InfoExtractor):
IE_NAME = 'bbc.co.uk'
IE_DESC = 'BBC iPlayer'
- _VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:programmes/(?!articles/)|iplayer(?:/[^/]+)?/(?:episode/|playlist/))|music/clips[/#])(?P<id>[\da-z]{8})'
+ _ID_REGEX = r'[pb][\da-z]{7}'
+ _VALID_URL = r'''(?x)
+ https?://
+ (?:www\.)?bbc\.co\.uk/
+ (?:
+ programmes/(?!articles/)|
+ iplayer(?:/[^/]+)?/(?:episode/|playlist/)|
+ music/clips[/#]|
+ radio/player/
+ )
+ (?P<id>%s)
+ ''' % _ID_REGEX
_MEDIASELECTOR_URLS = [
# Provides HQ HLS streams with even better quality that pc mediaset but fails
'info_dict': {
'id': 'b039d07m',
'ext': 'flv',
- 'title': 'Kaleidoscope, Leonard Cohen',
+ 'title': 'Leonard Cohen, Kaleidoscope - BBC Radio 4',
'description': 'The Canadian poet and songwriter reflects on his musical career.',
- 'duration': 1740,
},
'params': {
# rtmp download
'id': 'b00yng1d',
'ext': 'flv',
'title': 'The Voice UK: Series 3: Blind Auditions 5',
- 'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
+ 'description': 'Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.',
'duration': 5100,
},
'params': {
'params': {
# rtmp download
'skip_download': True,
- }
+ },
+ 'skip': 'Episode is no longer available on BBC iPlayer Radio',
}, {
- 'url': 'http://www.bbc.co.uk/music/clips/p02frcc3',
+ 'url': 'http://www.bbc.co.uk/music/clips/p022h44b',
'note': 'Audio',
'info_dict': {
- 'id': 'p02frcch',
+ 'id': 'p022h44j',
'ext': 'flv',
- 'title': 'Pete Tong, Past, Present and Future Special, Madeon - After Hours mix',
- 'description': 'French house superstar Madeon takes us out of the club and onto the after party.',
- 'duration': 3507,
+ 'title': 'BBC Proms Music Guides, Rachmaninov: Symphonic Dances',
+ 'description': "In this Proms Music Guide, Andrew McGregor looks at Rachmaninov's Symphonic Dances.",
+ 'duration': 227,
},
'params': {
# rtmp download
}, {
# iptv-all mediaset fails with geolocation however there is no geo restriction
# for this programme at all
- 'url': 'http://www.bbc.co.uk/programmes/b06bp7lf',
+ 'url': 'http://www.bbc.co.uk/programmes/b06rkn85',
'info_dict': {
- 'id': 'b06bp7kf',
+ 'id': 'b06rkms3',
'ext': 'flv',
- 'title': "Annie Mac's Friday Night, B.Traits sits in for Annie",
- 'description': 'B.Traits sits in for Annie Mac with a Mini-Mix from Disclosure.',
- 'duration': 10800,
+ 'title': "Best of the Mini-Mixes 2015: Part 3, Annie Mac's Friday Night - BBC Radio 1",
+ 'description': "Annie has part three in the Best of the Mini-Mixes 2015, plus the year's Most Played!",
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }, {
+ # compact player (https://github.com/rg3/youtube-dl/issues/8147)
+ 'url': 'http://www.bbc.co.uk/programmes/p028bfkf/player',
+ 'info_dict': {
+ 'id': 'p028bfkj',
+ 'ext': 'flv',
+ 'title': 'Extract from BBC documentary Look Stranger - Giant Leeks and Magic Brews',
+ 'description': 'Extract from BBC documentary Look Stranger - Giant Leeks and Magic Brews',
},
'params': {
# rtmp download
}, {
'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo',
'only_matching': True,
+ }, {
+ 'url': 'http://www.bbc.co.uk/radio/player/p03cchwf',
+ 'only_matching': True,
}
]
elif transfer_format == 'dash':
pass
elif transfer_format == 'hls':
- m3u8_formats = self._extract_m3u8_formats(
+ formats.extend(self._extract_m3u8_formats(
href, programme_id, ext='mp4', entry_protocol='m3u8_native',
- m3u8_id=supplier, fatal=False)
- if m3u8_formats:
- formats.extend(m3u8_formats)
+ m3u8_id=supplier, fatal=False))
# Direct link
else:
formats.append({
webpage = self._download_webpage(url, group_id, 'Downloading video page')
programme_id = None
+ duration = None
tviplayer = self._search_regex(
r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById',
if not programme_id:
programme_id = self._search_regex(
- r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False, default=None)
+ r'"vpid"\s*:\s*"(%s)"' % self._ID_REGEX, webpage, 'vpid', fatal=False, default=None)
if programme_id:
formats, subtitles = self._download_media_selector(programme_id)
- title = self._og_search_title(webpage)
+ title = self._og_search_title(webpage, default=None) or self._html_search_regex(
+ (r'<h2[^>]+id="parent-title"[^>]*>(.+?)</h2>',
+ r'<div[^>]+class="info"[^>]*>\s*<h1>(.+?)</h1>'), webpage, 'title')
description = self._search_regex(
- r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>',
- webpage, 'description', fatal=False)
+ (r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>',
+ r'<div[^>]+class="info_+synopsis"[^>]*>([^<]+)</div>'),
+ webpage, 'description', default=None)
+ if not description:
+ description = self._html_search_meta('description', webpage)
else:
programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
'ext': 'mp4',
'title': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
'duration': 56,
+ 'description': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
},
'params': {
'skip_download': True,
webpage = self._download_webpage(url, playlist_id)
- timestamp = None
- playlist_title = None
- playlist_description = None
-
- ld = self._parse_json(
- self._search_regex(
- r'(?s)<script type="application/ld\+json">(.+?)</script>',
- webpage, 'ld json', default='{}'),
- playlist_id, fatal=False)
- if ld:
- timestamp = parse_iso8601(ld.get('datePublished'))
- playlist_title = ld.get('headline')
- playlist_description = ld.get('articleBody')
+ json_ld_info = self._search_json_ld(webpage, playlist_id, default=None)
+ timestamp = json_ld_info.get('timestamp')
+ playlist_title = json_ld_info.get('title')
+ playlist_description = json_ld_info.get('description')
if not timestamp:
timestamp = parse_iso8601(self._search_regex(
# article with multiple videos embedded with playlist.sxml (e.g.
# http://www.bbc.com/sport/0/football/34475836)
playlists = re.findall(r'<param[^>]+name="playlist"[^>]+value="([^"]+)"', webpage)
+ playlists.extend(re.findall(r'data-media-id="([^"]+/playlist\.sxml)"', webpage))
if playlists:
entries = [
self._extract_from_playlist_sxml(playlist_url, playlist_id, timestamp)
# single video story (e.g. http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
programme_id = self._search_regex(
- [r'data-video-player-vpid="([\da-z]{8})"',
- r'<param[^>]+name="externalIdentifier"[^>]+value="([\da-z]{8})"'],
+ [r'data-video-player-vpid="(%s)"' % self._ID_REGEX,
+ r'<param[^>]+name="externalIdentifier"[^>]+value="(%s)"' % self._ID_REGEX,
+ r'videoId\s*:\s*["\'](%s)["\']' % self._ID_REGEX],
webpage, 'vpid', default=None)
if programme_id:
# Multiple video article (e.g.
# http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460)
- EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+[\da-z]{8}(?:\b[^"]+)?'
+ EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+%s(?:\b[^"]+)?' % self._ID_REGEX
entries = []
for match in extract_all(r'new\s+SMP\(({.+?})\)'):
embed_url = match.get('playerSettings', {}).get('externalEmbedUrl')
from __future__ import unicode_literals
from .common import InfoExtractor
+from ..compat import (
+ compat_chr,
+ compat_ord,
+ compat_urllib_parse_unquote,
+)
from ..utils import (
int_or_none,
parse_iso8601,
video_id = self._match_id(url)
video = self._download_json(
- 'http://beeg.com/api/v1/video/%s' % video_id, video_id)
+ 'https://api.beeg.com/api/v5/video/%s' % video_id, video_id)
+
+ def split(o, e):
+ def cut(s, x):
+ n.append(s[:x])
+ return s[x:]
+ n = []
+ r = len(o) % e
+ if r > 0:
+ o = cut(o, r)
+ while len(o) > e:
+ o = cut(o, e)
+ n.append(o)
+ return n
+
+ def decrypt_key(key):
+ # Reverse engineered from http://static.beeg.com/cpl/1105.js
+ a = '5ShMcIQlssOd7zChAIOlmeTZDaUxULbJRnywYaiB'
+ e = compat_urllib_parse_unquote(key)
+ o = ''.join([
+ compat_chr(compat_ord(e[n]) - compat_ord(a[n % len(a)]) % 21)
+ for n in range(len(e))])
+ return ''.join(split(o, 3)[::-1])
+
+ def decrypt_url(encrypted_url):
+ encrypted_url = self._proto_relative_url(
+ encrypted_url.replace('{DATA_MARKERS}', ''), 'https:')
+ key = self._search_regex(
+ r'/key=(.*?)%2Cend=', encrypted_url, 'key', default=None)
+ if not key:
+ return encrypted_url
+ return encrypted_url.replace(key, decrypt_key(key))
formats = []
for format_id, video_url in video.items():
if not height:
continue
formats.append({
- 'url': self._proto_relative_url(video_url.replace('{DATA_MARKERS}', ''), 'http:'),
+ 'url': decrypt_url(video_url),
'format_id': format_id,
'height': int(height),
})
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import base64
+import re
+
+from .common import InfoExtractor
+from ..compat import compat_urllib_parse_unquote
+
+
+class BigflixIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?bigflix\.com/.+/(?P<id>[0-9]+)'
+ _TESTS = [{
+ 'url': 'http://www.bigflix.com/Hindi-movies/Action-movies/Singham-Returns/16537',
+ 'md5': 'ec76aa9b1129e2e5b301a474e54fab74',
+ 'info_dict': {
+ 'id': '16537',
+ 'ext': 'mp4',
+ 'title': 'Singham Returns',
+ 'description': 'md5:3d2ba5815f14911d5cc6a501ae0cf65d',
+ }
+ }, {
+ # 2 formats
+ 'url': 'http://www.bigflix.com/Tamil-movies/Drama-movies/Madarasapatinam/16070',
+ 'info_dict': {
+ 'id': '16070',
+ 'ext': 'mp4',
+ 'title': 'Madarasapatinam',
+ 'description': 'md5:63b9b8ed79189c6f0418c26d9a3452ca',
+ 'formats': 'mincount:2',
+ },
+ 'params': {
+ 'skip_download': True,
+ }
+ }, {
+ # multiple formats
+ 'url': 'http://www.bigflix.com/Malayalam-movies/Drama-movies/Indian-Rupee/15967',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ title = self._html_search_regex(
+ r'<div[^>]+class=["\']pagetitle["\'][^>]*>(.+?)</div>',
+ webpage, 'title')
+
+ def decode_url(quoted_b64_url):
+ return base64.b64decode(compat_urllib_parse_unquote(
+ quoted_b64_url).encode('ascii')).decode('utf-8')
+
+ formats = []
+ for height, encoded_url in re.findall(
+ r'ContentURL_(\d{3,4})[pP][^=]+=([^&]+)', webpage):
+ video_url = decode_url(encoded_url)
+ f = {
+ 'url': video_url,
+ 'format_id': '%sp' % height,
+ 'height': int(height),
+ }
+ if video_url.startswith('rtmp'):
+ f['ext'] = 'flv'
+ formats.append(f)
+
+ file_url = self._search_regex(
+ r'file=([^&]+)', webpage, 'video url', default=None)
+ if file_url:
+ video_url = decode_url(file_url)
+ if all(f['url'] != video_url for f in formats):
+ formats.append({
+ 'url': decode_url(file_url),
+ })
+
+ self._sort_formats(formats)
+
+ description = self._html_search_meta('description', webpage)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'formats': formats
+ }
from __future__ import unicode_literals
import re
-import itertools
-import json
from .common import InfoExtractor
-from ..compat import (
- compat_etree_fromstring,
-)
+from ..compat import compat_str
from ..utils import (
int_or_none,
- unified_strdate,
+ unescapeHTML,
ExtractorError,
+ xpath_text,
)
class BiliBiliIE(InfoExtractor):
- _VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P<id>[0-9]+)/'
+ _VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P<id>\d+)(?:/index_(?P<page_num>\d+).html)?'
_TESTS = [{
'url': 'http://www.bilibili.tv/video/av1074402/',
'md5': '2c301e4dab317596e837c3e7633e7d86',
'info_dict': {
- 'id': '1074402_part1',
+ 'id': '1554319',
'ext': 'flv',
'title': '【金坷垃】金泡沫',
- 'duration': 308,
+ 'duration': 308313,
'upload_date': '20140420',
'thumbnail': 're:^https?://.+\.jpg',
+ 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
+ 'timestamp': 1397983878,
+ 'uploader': '菊子桑',
},
}, {
'url': 'http://www.bilibili.com/video/av1041170/',
'info_dict': {
'id': '1041170',
'title': '【BD1080P】刀语【诸神&异域】',
+ 'description': '这是个神奇的故事~每个人不留弹幕不给走哦~切利哦!~',
+ 'uploader': '枫叶逝去',
+ 'timestamp': 1396501299,
},
'playlist_count': 9,
}]
def _real_extract(self, url):
- video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
-
- if '(此视频不存在或被删除)' in webpage:
- raise ExtractorError(
- 'The video does not exist or was deleted', expected=True)
-
- if '>你没有权限浏览! 由于版权相关问题 我们不对您所在的地区提供服务<' in webpage:
- raise ExtractorError(
- 'The video is not available in your region due to copyright reasons',
- expected=True)
-
- video_code = self._search_regex(
- r'(?s)<div itemprop="video".*?>(.*?)</div>', webpage, 'video code')
-
- title = self._html_search_meta(
- 'media:title', video_code, 'title', fatal=True)
- duration_str = self._html_search_meta(
- 'duration', video_code, 'duration')
- if duration_str is None:
- duration = None
- else:
- duration_mobj = re.match(
- r'^T(?:(?P<hours>[0-9]+)H)?(?P<minutes>[0-9]+)M(?P<seconds>[0-9]+)S$',
- duration_str)
- duration = (
- int_or_none(duration_mobj.group('hours'), default=0) * 3600 +
- int(duration_mobj.group('minutes')) * 60 +
- int(duration_mobj.group('seconds')))
- upload_date = unified_strdate(self._html_search_meta(
- 'uploadDate', video_code, fatal=False))
- thumbnail = self._html_search_meta(
- 'thumbnailUrl', video_code, 'thumbnail', fatal=False)
-
- cid = self._search_regex(r'cid=(\d+)', webpage, 'cid')
-
- entries = []
-
- lq_page = self._download_webpage(
- 'http://interface.bilibili.com/v_cdn_play?appkey=1&cid=%s' % cid,
- video_id,
- note='Downloading LQ video info'
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ page_num = mobj.group('page_num') or '1'
+
+ view_data = self._download_json(
+ 'http://api.bilibili.com/view?type=json&appkey=8e9fc618fbd41e28&id=%s&page=%s' % (video_id, page_num),
+ video_id)
+ if 'error' in view_data:
+ raise ExtractorError('%s said: %s' % (self.IE_NAME, view_data['error']), expected=True)
+
+ cid = view_data['cid']
+ title = unescapeHTML(view_data['title'])
+
+ doc = self._download_xml(
+ 'http://interface.bilibili.com/v_cdn_play?appkey=8e9fc618fbd41e28&cid=%s' % cid,
+ cid,
+ 'Downloading page %s/%s' % (page_num, view_data['pages'])
)
- try:
- err_info = json.loads(lq_page)
- raise ExtractorError(
- 'BiliBili said: ' + err_info['error_text'], expected=True)
- except ValueError:
- pass
- lq_doc = compat_etree_fromstring(lq_page)
- lq_durls = lq_doc.findall('./durl')
+ if xpath_text(doc, './result') == 'error':
+ raise ExtractorError('%s said: %s' % (self.IE_NAME, xpath_text(doc, './message')), expected=True)
- hq_doc = self._download_xml(
- 'http://interface.bilibili.com/playurl?appkey=1&cid=%s' % cid,
- video_id,
- note='Downloading HQ video info',
- fatal=False,
- )
- if hq_doc is not False:
- hq_durls = hq_doc.findall('./durl')
- assert len(lq_durls) == len(hq_durls)
- else:
- hq_durls = itertools.repeat(None)
+ entries = []
- i = 1
- for lq_durl, hq_durl in zip(lq_durls, hq_durls):
+ for durl in doc.findall('./durl'):
+ size = xpath_text(durl, ['./filesize', './size'])
formats = [{
- 'format_id': 'lq',
- 'quality': 1,
- 'url': lq_durl.find('./url').text,
- 'filesize': int_or_none(
- lq_durl.find('./size'), get_attr='text'),
+ 'url': durl.find('./url').text,
+ 'filesize': int_or_none(size),
+ 'ext': 'flv',
}]
- if hq_durl is not None:
- formats.append({
- 'format_id': 'hq',
- 'quality': 2,
- 'ext': 'flv',
- 'url': hq_durl.find('./url').text,
- 'filesize': int_or_none(
- hq_durl.find('./size'), get_attr='text'),
- })
- self._sort_formats(formats)
+ backup_urls = durl.find('./backup_url')
+ if backup_urls is not None:
+ for backup_url in backup_urls.findall('./url'):
+ formats.append({'url': backup_url.text})
+ formats.reverse()
entries.append({
- 'id': '%s_part%d' % (video_id, i),
+ 'id': '%s_part%s' % (cid, xpath_text(durl, './order')),
'title': title,
+ 'duration': int_or_none(xpath_text(durl, './length'), 1000),
'formats': formats,
- 'duration': duration,
- 'upload_date': upload_date,
- 'thumbnail': thumbnail,
})
- i += 1
-
- return {
- '_type': 'multi_video',
- 'entries': entries,
- 'id': video_id,
- 'title': title
+ info = {
+ 'id': compat_str(cid),
+ 'title': title,
+ 'description': view_data.get('description'),
+ 'thumbnail': view_data.get('pic'),
+ 'uploader': view_data.get('author'),
+ 'timestamp': int_or_none(view_data.get('created')),
+ 'view_count': int_or_none(view_data.get('play')),
+ 'duration': int_or_none(xpath_text(doc, './timelength')),
}
+
+ if len(entries) == 1:
+ entries[0].update(info)
+ return entries[0]
+ else:
+ info.update({
+ '_type': 'multi_video',
+ 'id': video_id,
+ 'entries': entries,
+ })
+ return info
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from .amp import AMPIE
+from ..utils import (
+ ExtractorError,
+ int_or_none,
+ parse_iso8601,
+)
+
+
+class BleacherReportIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/articles/(?P<id>\d+)'
+ _TESTS = [{
+ 'url': 'http://bleacherreport.com/articles/2496438-fsu-stat-projections-is-jalen-ramsey-best-defensive-player-in-college-football',
+ 'md5': 'a3ffc3dc73afdbc2010f02d98f990f20',
+ 'info_dict': {
+ 'id': '2496438',
+ 'ext': 'mp4',
+ 'title': 'FSU Stat Projections: Is Jalen Ramsey Best Defensive Player in College Football?',
+ 'uploader_id': 3992341,
+ 'description': 'CFB, ACC, Florida State',
+ 'timestamp': 1434380212,
+ 'upload_date': '20150615',
+ 'uploader': 'Team Stream Now ',
+ },
+ 'add_ie': ['Ooyala'],
+ }, {
+ 'url': 'http://bleacherreport.com/articles/2586817-aussie-golfers-get-fright-of-their-lives-after-being-chased-by-angry-kangaroo',
+ 'md5': 'af5f90dc9c7ba1c19d0a3eac806bbf50',
+ 'info_dict': {
+ 'id': '2586817',
+ 'ext': 'mp4',
+ 'title': 'Aussie Golfers Get Fright of Their Lives After Being Chased by Angry Kangaroo',
+ 'timestamp': 1446839961,
+ 'uploader': 'Sean Fay',
+ 'description': 'md5:825e94e0f3521df52fa83b2ed198fa20',
+ 'uploader_id': 6466954,
+ 'upload_date': '20151011',
+ },
+ 'add_ie': ['Youtube'],
+ }]
+
+ def _real_extract(self, url):
+ article_id = self._match_id(url)
+
+ article_data = self._download_json('http://api.bleacherreport.com/api/v1/articles/%s' % article_id, article_id)['article']
+
+ thumbnails = []
+ primary_photo = article_data.get('primaryPhoto')
+ if primary_photo:
+ thumbnails = [{
+ 'url': primary_photo['url'],
+ 'width': primary_photo.get('width'),
+ 'height': primary_photo.get('height'),
+ }]
+
+ info = {
+ '_type': 'url_transparent',
+ 'id': article_id,
+ 'title': article_data['title'],
+ 'uploader': article_data.get('author', {}).get('name'),
+ 'uploader_id': article_data.get('authorId'),
+ 'timestamp': parse_iso8601(article_data.get('createdAt')),
+ 'thumbnails': thumbnails,
+ 'comment_count': int_or_none(article_data.get('commentsCount')),
+ 'view_count': int_or_none(article_data.get('hitCount')),
+ }
+
+ video = article_data.get('video')
+ if video:
+ video_type = video['type']
+ if video_type == 'cms.bleacherreport.com':
+ info['url'] = 'http://bleacherreport.com/video_embed?id=%s' % video['id']
+ elif video_type == 'ooyala.com':
+ info['url'] = 'ooyala:%s' % video['id']
+ elif video_type == 'youtube.com':
+ info['url'] = video['id']
+ elif video_type == 'vine.co':
+ info['url'] = 'https://vine.co/v/%s' % video['id']
+ else:
+ info['url'] = video_type + video['id']
+ return info
+ else:
+ raise ExtractorError('no video in the article', expected=True)
+
+
+class BleacherReportCMSIE(AMPIE):
+ _VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/video_embed\?id=(?P<id>[0-9a-f-]{36})'
+ _TESTS = [{
+ 'url': 'http://bleacherreport.com/video_embed?id=8fd44c2f-3dc5-4821-9118-2c825a98c0e1',
+ 'md5': '8c2c12e3af7805152675446c905d159b',
+ 'info_dict': {
+ 'id': '8fd44c2f-3dc5-4821-9118-2c825a98c0e1',
+ 'ext': 'flv',
+ 'title': 'Cena vs. Rollins Would Expose the Heavyweight Division',
+ 'description': 'md5:984afb4ade2f9c0db35f3267ed88b36e',
+ },
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ info = self._extract_feed_info('http://cms.bleacherreport.com/media/items/%s/akamai.json' % video_id)
+ info['id'] = video_id
+ return info
+++ /dev/null
-from __future__ import unicode_literals
-
-import re
-
-from .common import InfoExtractor
-
-from ..compat import compat_urlparse
-from ..utils import (
- clean_html,
- int_or_none,
- parse_iso8601,
- sanitized_Request,
- unescapeHTML,
- xpath_text,
- xpath_with_ns,
-)
-
-
-class BlipTVIE(InfoExtractor):
- _VALID_URL = r'https?://(?:\w+\.)?blip\.tv/(?:(?:.+-|rss/flash/)(?P<id>\d+)|((?:play/|api\.swf#)(?P<lookup_id>[\da-zA-Z+_]+)))'
-
- _TESTS = [
- {
- 'url': 'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352',
- 'md5': '80baf1ec5c3d2019037c1c707d676b9f',
- 'info_dict': {
- 'id': '5779306',
- 'ext': 'm4v',
- 'title': 'CBR EXCLUSIVE: "Gotham City Imposters" Bats VS Jokerz Short 3',
- 'description': 'md5:9bc31f227219cde65e47eeec8d2dc596',
- 'timestamp': 1323138843,
- 'upload_date': '20111206',
- 'uploader': 'cbr',
- 'uploader_id': '679425',
- 'duration': 81,
- }
- },
- {
- # https://github.com/rg3/youtube-dl/pull/2274
- 'note': 'Video with subtitles',
- 'url': 'http://blip.tv/play/h6Uag5OEVgI.html',
- 'md5': '309f9d25b820b086ca163ffac8031806',
- 'info_dict': {
- 'id': '6586561',
- 'ext': 'mp4',
- 'title': 'Red vs. Blue Season 11 Episode 1',
- 'description': 'One-Zero-One',
- 'timestamp': 1371261608,
- 'upload_date': '20130615',
- 'uploader': 'redvsblue',
- 'uploader_id': '792887',
- 'duration': 279,
- }
- },
- {
- # https://bugzilla.redhat.com/show_bug.cgi?id=967465
- 'url': 'http://a.blip.tv/api.swf#h6Uag5KbVwI',
- 'md5': '314e87b1ebe7a48fcbfdd51b791ce5a6',
- 'info_dict': {
- 'id': '6573122',
- 'ext': 'mov',
- 'upload_date': '20130520',
- 'description': 'Two hapless space marines argue over what to do when they realize they have an astronomically huge problem on their hands.',
- 'title': 'Red vs. Blue Season 11 Trailer',
- 'timestamp': 1369029609,
- 'uploader': 'redvsblue',
- 'uploader_id': '792887',
- }
- },
- {
- 'url': 'http://blip.tv/play/gbk766dkj4Yn',
- 'md5': 'fe0a33f022d49399a241e84a8ea8b8e3',
- 'info_dict': {
- 'id': '1749452',
- 'ext': 'mp4',
- 'upload_date': '20090208',
- 'description': 'Witness the first appearance of the Nostalgia Critic character, as Doug reviews the movie Transformers.',
- 'title': 'Nostalgia Critic: Transformers',
- 'timestamp': 1234068723,
- 'uploader': 'NostalgiaCritic',
- 'uploader_id': '246467',
- }
- },
- {
- # https://github.com/rg3/youtube-dl/pull/4404
- 'note': 'Audio only',
- 'url': 'http://blip.tv/hilarios-productions/weekly-manga-recap-kingdom-7119982',
- 'md5': '76c0a56f24e769ceaab21fbb6416a351',
- 'info_dict': {
- 'id': '7103299',
- 'ext': 'flv',
- 'title': 'Weekly Manga Recap: Kingdom',
- 'description': 'And then Shin breaks the enemy line, and he's all like HWAH! And then he slices a guy and it's all like FWASHING! And... it's really hard to describe the best parts of this series without breaking down into sound effects, okay?',
- 'timestamp': 1417660321,
- 'upload_date': '20141204',
- 'uploader': 'The Rollo T',
- 'uploader_id': '407429',
- 'duration': 7251,
- 'vcodec': 'none',
- }
- },
- {
- # missing duration
- 'url': 'http://blip.tv/rss/flash/6700880',
- 'info_dict': {
- 'id': '6684191',
- 'ext': 'm4v',
- 'title': 'Cowboy Bebop: Gateway Shuffle Review',
- 'description': 'md5:3acc480c0f9ae157f5fe88547ecaf3f8',
- 'timestamp': 1386639757,
- 'upload_date': '20131210',
- 'uploader': 'sfdebris',
- 'uploader_id': '706520',
- }
- }
- ]
-
- @staticmethod
- def _extract_url(webpage):
- mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
- if mobj:
- return 'http://blip.tv/a/a-' + mobj.group(1)
- mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9_]+)', webpage)
- if mobj:
- return mobj.group(1)
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- lookup_id = mobj.group('lookup_id')
-
- # See https://github.com/rg3/youtube-dl/issues/857 and
- # https://github.com/rg3/youtube-dl/issues/4197
- if lookup_id:
- urlh = self._request_webpage(
- 'http://blip.tv/play/%s' % lookup_id, lookup_id, 'Resolving lookup id')
- url = compat_urlparse.urlparse(urlh.geturl())
- qs = compat_urlparse.parse_qs(url.query)
- mobj = re.match(self._VALID_URL, qs['file'][0])
-
- video_id = mobj.group('id')
-
- rss = self._download_xml('http://blip.tv/rss/flash/%s' % video_id, video_id, 'Downloading video RSS')
-
- def _x(p):
- return xpath_with_ns(p, {
- 'blip': 'http://blip.tv/dtd/blip/1.0',
- 'media': 'http://search.yahoo.com/mrss/',
- 'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd',
- })
-
- item = rss.find('channel/item')
-
- video_id = xpath_text(item, _x('blip:item_id'), 'video id') or lookup_id
- title = xpath_text(item, 'title', 'title', fatal=True)
- description = clean_html(xpath_text(item, _x('blip:puredescription'), 'description'))
- timestamp = parse_iso8601(xpath_text(item, _x('blip:datestamp'), 'timestamp'))
- uploader = xpath_text(item, _x('blip:user'), 'uploader')
- uploader_id = xpath_text(item, _x('blip:userid'), 'uploader id')
- duration = int_or_none(xpath_text(item, _x('blip:runtime'), 'duration'))
- media_thumbnail = item.find(_x('media:thumbnail'))
- thumbnail = (media_thumbnail.get('url') if media_thumbnail is not None
- else xpath_text(item, 'image', 'thumbnail'))
- categories = [category.text for category in item.findall('category') if category is not None]
-
- formats = []
- subtitles_urls = {}
-
- media_group = item.find(_x('media:group'))
- for media_content in media_group.findall(_x('media:content')):
- url = media_content.get('url')
- role = media_content.get(_x('blip:role'))
- msg = self._download_webpage(
- url + '?showplayer=20140425131715&referrer=http://blip.tv&mask=7&skin=flashvars&view=url',
- video_id, 'Resolving URL for %s' % role)
- real_url = compat_urlparse.parse_qs(msg.strip())['message'][0]
-
- media_type = media_content.get('type')
- if media_type == 'text/srt' or url.endswith('.srt'):
- LANGS = {
- 'english': 'en',
- }
- lang = role.rpartition('-')[-1].strip().lower()
- langcode = LANGS.get(lang, lang)
- subtitles_urls[langcode] = url
- elif media_type.startswith('video/'):
- formats.append({
- 'url': real_url,
- 'format_id': role,
- 'format_note': media_type,
- 'vcodec': media_content.get(_x('blip:vcodec')) or 'none',
- 'acodec': media_content.get(_x('blip:acodec')),
- 'filesize': media_content.get('filesize'),
- 'width': int_or_none(media_content.get('width')),
- 'height': int_or_none(media_content.get('height')),
- })
- self._check_formats(formats, video_id)
- self._sort_formats(formats)
-
- subtitles = self.extract_subtitles(video_id, subtitles_urls)
-
- return {
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'timestamp': timestamp,
- 'uploader': uploader,
- 'uploader_id': uploader_id,
- 'duration': duration,
- 'thumbnail': thumbnail,
- 'categories': categories,
- 'formats': formats,
- 'subtitles': subtitles,
- }
-
- def _get_subtitles(self, video_id, subtitles_urls):
- subtitles = {}
- for lang, url in subtitles_urls.items():
- # For some weird reason, blip.tv serves a video instead of subtitles
- # when we request with a common UA
- req = sanitized_Request(url)
- req.add_header('User-Agent', 'youtube-dl')
- subtitles[lang] = [{
- # The extension is 'srt' but it's actually an 'ass' file
- 'ext': 'ass',
- 'data': self._download_webpage(req, None, note=False),
- }]
- return subtitles
-
-
-class BlipTVUserIE(InfoExtractor):
- _VALID_URL = r'(?:(?:https?://(?:\w+\.)?blip\.tv/)|bliptvuser:)(?!api\.swf)([^/]+)/*$'
- _PAGE_SIZE = 12
- IE_NAME = 'blip.tv:user'
- _TEST = {
- 'url': 'http://blip.tv/actone',
- 'info_dict': {
- 'id': 'actone',
- 'title': 'Act One: The Series',
- },
- 'playlist_count': 5,
- }
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- username = mobj.group(1)
-
- page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
-
- page = self._download_webpage(url, username, 'Downloading user page')
- mobj = re.search(r'data-users-id="([^"]+)"', page)
- page_base = page_base % mobj.group(1)
- title = self._og_search_title(page)
-
- # Download video ids using BlipTV Ajax calls. Result size per
- # query is limited (currently to 12 videos) so we need to query
- # page by page until there are no video ids - it means we got
- # all of them.
-
- video_ids = []
- pagenum = 1
-
- while True:
- url = page_base + "&page=" + str(pagenum)
- page = self._download_webpage(
- url, username, 'Downloading video ids from page %d' % pagenum)
-
- # Extract video identifiers
- ids_in_page = []
-
- for mobj in re.finditer(r'href="/([^"]+)"', page):
- if mobj.group(1) not in ids_in_page:
- ids_in_page.append(unescapeHTML(mobj.group(1)))
-
- video_ids.extend(ids_in_page)
-
- # A little optimization - if current page is not
- # "full", ie. does not contain PAGE_SIZE video ids then
- # we can assume that this page is the last one - there
- # are no more ids on further pages - no need to query
- # again.
-
- if len(ids_in_page) < self._PAGE_SIZE:
- break
-
- pagenum += 1
-
- urls = ['http://blip.tv/%s' % video_id for video_id in video_ids]
- url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls]
- return self.playlist_result(
- url_entries, playlist_title=title, playlist_id=username)
class BloombergIE(InfoExtractor):
- _VALID_URL = r'https?://www\.bloomberg\.com/news/[^/]+/[^/]+/(?P<id>[^/?#]+)'
+ _VALID_URL = r'https?://(?:www\.)?bloomberg\.com/(?:[^/]+/)*(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'http://www.bloomberg.com/news/videos/b/aaeae121-5949-481e-a1ce-4562db6f5df2',
}, {
'url': 'http://www.bloomberg.com/news/articles/2015-11-12/five-strange-things-that-have-been-happening-in-financial-markets',
'only_matching': True,
+ }, {
+ 'url': 'http://www.bloomberg.com/politics/videos/2015-11-25/karl-rove-on-jeb-bush-s-struggles-stopping-trump',
+ 'only_matching': True,
}]
def _real_extract(self, url):
name = self._match_id(url)
webpage = self._download_webpage(url, name)
- video_id = self._search_regex(r'"bmmrId":"(.+?)"', webpage, 'id')
+ video_id = self._search_regex(
+ r'["\']bmmrId["\']\s*:\s*(["\'])(?P<url>.+?)\1',
+ webpage, 'id', group='url')
title = re.sub(': Video$', '', self._og_search_title(webpage))
embed_info = self._download_json(
'http://www.bloomberg.com/api/embed?id=%s' % video_id, video_id)
formats = []
for stream in embed_info['streams']:
- if stream["muxing_format"] == "TS":
- formats.extend(self._extract_m3u8_formats(stream['url'], video_id))
+ stream_url = stream.get('url')
+ if not stream_url:
+ continue
+ if stream['muxing_format'] == 'TS':
+ formats.extend(self._extract_m3u8_formats(
+ stream_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
else:
- formats.extend(self._extract_f4m_formats(stream['url'], video_id))
+ formats.extend(self._extract_f4m_formats(
+ stream_url, video_id, f4m_id='hds', fatal=False))
self._sort_formats(formats)
return {
# coding: utf-8
from __future__ import unicode_literals
+import re
+
from .common import InfoExtractor
+from ..utils import (
+ js_to_json,
+ determine_ext,
+)
class BpbIE(InfoExtractor):
_TEST = {
'url': 'http://www.bpb.de/mediathek/297/joachim-gauck-zu-1989-und-die-erinnerung-an-die-ddr',
- 'md5': '0792086e8e2bfbac9cdf27835d5f2093',
+ # md5 fails in Python 2.6 due to buggy server response and wrong handling of urllib2
+ 'md5': 'c4f84c8a8044ca9ff68bb8441d300b3f',
'info_dict': {
'id': '297',
'ext': 'mp4',
title = self._html_search_regex(
r'<h2 class="white">(.*?)</h2>', webpage, 'title')
- video_url = self._html_search_regex(
- r'(http://film\.bpb\.de/player/dokument_[0-9]+\.mp4)',
- webpage, 'video URL')
+ video_info_dicts = re.findall(
+ r"({\s*src:\s*'http://film\.bpb\.de/[^}]+})", webpage)
+
+ formats = []
+ for video_info in video_info_dicts:
+ video_info = self._parse_json(video_info, video_id, transform_source=js_to_json)
+ quality = video_info['quality']
+ video_url = video_info['src']
+ formats.append({
+ 'url': video_url,
+ 'preference': 10 if quality == 'high' else 0,
+ 'format_note': quality,
+ 'format_id': '%s-%s' % (quality, determine_ext(video_url)),
+ })
+
+ self._sort_formats(formats)
return {
'id': video_id,
- 'url': video_url,
+ 'formats': formats,
'title': title,
'description': self._og_search_description(webpage),
}
# coding: utf-8
from __future__ import unicode_literals
+import re
+
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
parse_duration,
+ xpath_element,
+ xpath_text,
)
class BRIE(InfoExtractor):
IE_DESC = 'Bayerischer Rundfunk Mediathek'
- _VALID_URL = r'https?://(?:www\.)?br\.de/(?:[a-z0-9\-_]+/)+(?P<id>[a-z0-9\-_]+)\.html'
- _BASE_URL = 'http://www.br.de'
+ _VALID_URL = r'(?P<base_url>https?://(?:www\.)?br(?:-klassik)?\.de)/(?:[a-z0-9\-_]+/)+(?P<id>[a-z0-9\-_]+)\.html'
_TESTS = [
{
'id': '48f656ef-287e-486f-be86-459122db22cc',
'ext': 'mp4',
'title': 'Die böse Überraschung',
- 'description': 'Betriebliche Altersvorsorge: Die böse Überraschung',
+ 'description': 'md5:ce9ac81b466ce775b8018f6801b48ac9',
'duration': 180,
'uploader': 'Reinhard Weber',
'upload_date': '20150422',
},
{
'url': 'http://www.br.de/nachrichten/oberbayern/inhalt/muenchner-polizeipraesident-schreiber-gestorben-100.html',
- 'md5': 'a44396d73ab6a68a69a568fae10705bb',
+ 'md5': 'af3a3a4aa43ff0ce6a89504c67f427ef',
'info_dict': {
'id': 'a4b83e34-123d-4b81-9f4e-c0d3121a4e05',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': 'Manfred Schreiber ist tot',
- 'description': 'Abendschau kompakt: Manfred Schreiber ist tot',
+ 'description': 'md5:b454d867f2a9fc524ebe88c3f5092d97',
'duration': 26,
}
},
{
- 'url': 'http://www.br.de/radio/br-klassik/sendungen/allegro/premiere-urauffuehrung-the-land-2015-dance-festival-muenchen-100.html',
+ 'url': 'https://www.br-klassik.de/audio/peeping-tom-premierenkritik-dance-festival-muenchen-100.html',
'md5': '8b5b27c0b090f3b35eac4ab3f7a73d3d',
'info_dict': {
'id': '74c603c9-26d3-48bb-b85b-079aeed66e0b',
'ext': 'aac',
'title': 'Kurzweilig und sehr bewegend',
- 'description': '"The Land" von Peeping Tom: Kurzweilig und sehr bewegend',
+ 'description': 'md5:0351996e3283d64adeb38ede91fac54e',
'duration': 296,
}
},
'id': '6ba73750-d405-45d3-861d-1ce8c524e059',
'ext': 'mp4',
'title': 'Umweltbewusster Häuslebauer',
- 'description': 'Uwe Erdelt: Umweltbewusster Häuslebauer',
+ 'description': 'md5:d52dae9792d00226348c1dbb13c9bae2',
'duration': 116,
}
},
'id': 'd982c9ce-8648-4753-b358-98abb8aec43d',
'ext': 'mp4',
'title': 'Folge 1 - Metaphysik',
- 'description': 'Kant für Anfänger: Folge 1 - Metaphysik',
+ 'description': 'md5:bb659990e9e59905c3d41e369db1fbe3',
'duration': 893,
'uploader': 'Eva Maria Steimle',
'upload_date': '20140117',
]
def _real_extract(self, url):
- display_id = self._match_id(url)
+ base_url, display_id = re.search(self._VALID_URL, url).groups()
page = self._download_webpage(url, display_id)
xml_url = self._search_regex(
r"return BRavFramework\.register\(BRavFramework\('avPlayer_(?:[a-f0-9-]{36})'\)\.setup\({dataURL:'(/(?:[a-z0-9\-]+/)+[a-z0-9/~_.-]+)'}\)\);", page, 'XMLURL')
- xml = self._download_xml(self._BASE_URL + xml_url, None)
+ xml = self._download_xml(base_url + xml_url, display_id)
medias = []
for xml_media in xml.findall('video') + xml.findall('audio'):
+ media_id = xml_media.get('externalId')
media = {
- 'id': xml_media.get('externalId'),
- 'title': xml_media.find('title').text,
- 'duration': parse_duration(xml_media.find('duration').text),
- 'formats': self._extract_formats(xml_media.find('assets')),
- 'thumbnails': self._extract_thumbnails(xml_media.find('teaserImage/variants')),
- 'description': ' '.join(xml_media.find('shareTitle').text.splitlines()),
- 'webpage_url': xml_media.find('permalink').text
+ 'id': media_id,
+ 'title': xpath_text(xml_media, 'title', 'title', True),
+ 'duration': parse_duration(xpath_text(xml_media, 'duration')),
+ 'formats': self._extract_formats(xpath_element(
+ xml_media, 'assets'), media_id),
+ 'thumbnails': self._extract_thumbnails(xpath_element(
+ xml_media, 'teaserImage/variants'), base_url),
+ 'description': xpath_text(xml_media, 'desc'),
+ 'webpage_url': xpath_text(xml_media, 'permalink'),
+ 'uploader': xpath_text(xml_media, 'author'),
}
- if xml_media.find('author').text:
- media['uploader'] = xml_media.find('author').text
- if xml_media.find('broadcastDate').text:
- media['upload_date'] = ''.join(reversed(xml_media.find('broadcastDate').text.split('.')))
+ broadcast_date = xpath_text(xml_media, 'broadcastDate')
+ if broadcast_date:
+ media['upload_date'] = ''.join(reversed(broadcast_date.split('.')))
medias.append(media)
if len(medias) > 1:
raise ExtractorError('No media entries found')
return medias[0]
- def _extract_formats(self, assets):
-
- def text_or_none(asset, tag):
- elem = asset.find(tag)
- return None if elem is None else elem.text
-
- formats = [{
- 'url': text_or_none(asset, 'downloadUrl'),
- 'ext': text_or_none(asset, 'mediaType'),
- 'format_id': asset.get('type'),
- 'width': int_or_none(text_or_none(asset, 'frameWidth')),
- 'height': int_or_none(text_or_none(asset, 'frameHeight')),
- 'tbr': int_or_none(text_or_none(asset, 'bitrateVideo')),
- 'abr': int_or_none(text_or_none(asset, 'bitrateAudio')),
- 'vcodec': text_or_none(asset, 'codecVideo'),
- 'acodec': text_or_none(asset, 'codecAudio'),
- 'container': text_or_none(asset, 'mediaType'),
- 'filesize': int_or_none(text_or_none(asset, 'size')),
- } for asset in assets.findall('asset')
- if asset.find('downloadUrl') is not None]
-
+ def _extract_formats(self, assets, media_id):
+ formats = []
+ for asset in assets.findall('asset'):
+ format_url = xpath_text(asset, ['downloadUrl', 'url'])
+ asset_type = asset.get('type')
+ if asset_type == 'HDS':
+ formats.extend(self._extract_f4m_formats(
+ format_url + '?hdcore=3.2.0', media_id, f4m_id='hds', fatal=False))
+ elif asset_type == 'HLS':
+ formats.extend(self._extract_m3u8_formats(
+ format_url, media_id, 'mp4', 'm3u8_native', m3u8_id='hds', fatal=False))
+ else:
+ format_info = {
+ 'ext': xpath_text(asset, 'mediaType'),
+ 'width': int_or_none(xpath_text(asset, 'frameWidth')),
+ 'height': int_or_none(xpath_text(asset, 'frameHeight')),
+ 'tbr': int_or_none(xpath_text(asset, 'bitrateVideo')),
+ 'abr': int_or_none(xpath_text(asset, 'bitrateAudio')),
+ 'vcodec': xpath_text(asset, 'codecVideo'),
+ 'acodec': xpath_text(asset, 'codecAudio'),
+ 'container': xpath_text(asset, 'mediaType'),
+ 'filesize': int_or_none(xpath_text(asset, 'size')),
+ }
+ format_url = self._proto_relative_url(format_url)
+ if format_url:
+ http_format_info = format_info.copy()
+ http_format_info.update({
+ 'url': format_url,
+ 'format_id': 'http-%s' % asset_type,
+ })
+ formats.append(http_format_info)
+ server_prefix = xpath_text(asset, 'serverPrefix')
+ if server_prefix:
+ rtmp_format_info = format_info.copy()
+ rtmp_format_info.update({
+ 'url': server_prefix,
+ 'play_path': xpath_text(asset, 'fileName'),
+ 'format_id': 'rtmp-%s' % asset_type,
+ })
+ formats.append(rtmp_format_info)
self._sort_formats(formats)
return formats
- def _extract_thumbnails(self, variants):
+ def _extract_thumbnails(self, variants, base_url):
thumbnails = [{
- 'url': self._BASE_URL + variant.find('url').text,
- 'width': int_or_none(variant.find('width').text),
- 'height': int_or_none(variant.find('height').text),
- } for variant in variants.findall('variant')]
+ 'url': base_url + xpath_text(variant, 'url'),
+ 'width': int_or_none(xpath_text(variant, 'width')),
+ 'height': int_or_none(xpath_text(variant, 'height')),
+ } for variant in variants.findall('variant') if xpath_text(variant, 'url')]
thumbnails.sort(key=lambda x: x['width'] * x['height'], reverse=True)
return thumbnails
class BrightcoveNewIE(InfoExtractor):
IE_NAME = 'brightcove:new'
- _VALID_URL = r'https?://players\.brightcove\.net/(?P<account_id>\d+)/(?P<player_id>[^/]+)_(?P<embed>[^/]+)/index\.html\?.*videoId=(?P<video_id>\d+)'
+ _VALID_URL = r'https?://players\.brightcove\.net/(?P<account_id>\d+)/(?P<player_id>[^/]+)_(?P<embed>[^/]+)/index\.html\?.*videoId=(?P<video_id>(?:ref:)?\d+)'
_TESTS = [{
'url': 'http://players.brightcove.net/929656772001/e41d32dc-ec74-459e-a845-6c69f7b724ea_default/index.html?videoId=4463358922001',
'md5': 'c8100925723840d4b0d243f7025703be',
'params': {
'skip_download': True,
}
+ }, {
+ # ref: prefixed video id
+ 'url': 'http://players.brightcove.net/3910869709001/21519b5c-4b3b-4363-accb-bdc8f358f823_default/index.html?videoId=ref:7069442',
+ 'only_matching': True,
}]
+ @staticmethod
+ def _extract_url(webpage):
+ urls = BrightcoveNewIE._extract_urls(webpage)
+ return urls[0] if urls else None
+
@staticmethod
def _extract_urls(webpage):
# Reference:
# 1. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideoiniframe
- # 2. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideousingjavascript)
+ # 2. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideousingjavascript
# 3. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/embed-in-page.html
+ # 4. https://support.brightcove.com/en/video-cloud/docs/dynamically-assigning-videos-player
entries = []
for video_id, account_id, player_id, embed in re.findall(
# According to examples from [3] it's unclear whether video id
# may be optional and what to do when it is
+ # According to [4] data-video-id may be prefixed with ref:
r'''(?sx)
<video[^>]+
- data-video-id=["\'](\d+)["\'][^>]*>.*?
+ data-video-id=["\']((?:ref:)?\d+)["\'][^>]*>.*?
</video>.*?
<script[^>]+
src=["\'](?:https?:)?//players\.brightcove\.net/
if source_type == 'application/x-mpegURL':
if not src:
continue
- m3u8_formats = self._extract_m3u8_formats(
+ formats.extend(self._extract_m3u8_formats(
src, video_id, 'mp4', entry_protocol='m3u8_native',
- m3u8_id='hls', fatal=False)
- if m3u8_formats:
- formats.extend(m3u8_formats)
+ m3u8_id='hls', fatal=False))
else:
streaming_src = source.get('streaming_src')
stream_name, app_name = source.get('stream_name'), source.get('app_name')
'info_dict': {
'id': 'studio-c-season-5-episode-5',
'ext': 'mp4',
- 'description': 'md5:5438d33774b6bdc662f9485a340401cc',
+ 'description': 'md5:e07269172baff037f8e8bf9956bc9747',
'title': 'Season 5 Episode 5',
- 'thumbnail': 're:^https?://.*\.jpg$'
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'duration': 1486.486,
},
'params': {
'skip_download': True,
+++ /dev/null
-# coding: utf-8
-from __future__ import unicode_literals
-
-import re
-
-from .common import InfoExtractor
-
-
-class Canal13clIE(InfoExtractor):
- _VALID_URL = r'^http://(?:www\.)?13\.cl/(?:[^/?#]+/)*(?P<id>[^/?#]+)'
- _TEST = {
- 'url': 'http://www.13.cl/t13/nacional/el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda',
- 'md5': '4cb1fa38adcad8fea88487a078831755',
- 'info_dict': {
- 'id': '1403022125',
- 'display_id': 'el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda',
- 'ext': 'mp4',
- 'title': 'El "círculo de hierro" de Michelle Bachelet en su regreso a La Moneda',
- 'description': '(Foto: Agencia Uno) En nueve días más, Michelle Bachelet va a asumir por segunda vez como presidenta de la República. Entre aquellos que la acompañarán hay caras que se repiten y otras que se consolidan en su entorno de colaboradores más cercanos.',
- }
- }
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- display_id = mobj.group('id')
-
- webpage = self._download_webpage(url, display_id)
-
- title = self._html_search_meta(
- 'twitter:title', webpage, 'title', fatal=True)
- description = self._html_search_meta(
- 'twitter:description', webpage, 'description')
- url = self._html_search_regex(
- r'articuloVideo = \"(.*?)\"', webpage, 'url')
- real_id = self._search_regex(
- r'[^0-9]([0-9]{7,})[^0-9]', url, 'id', default=display_id)
- thumbnail = self._html_search_regex(
- r'articuloImagen = \"(.*?)\"', webpage, 'thumbnail')
-
- return {
- 'id': real_id,
- 'display_id': display_id,
- 'url': url,
- 'title': title,
- 'description': description,
- 'ext': 'mp4',
- 'thumbnail': thumbnail,
- }
class Canalc2IE(InfoExtractor):
IE_NAME = 'canalc2.tv'
- _VALID_URL = r'https?://(?:www\.)?canalc2\.tv/video/(?P<id>\d+)'
+ _VALID_URL = r'https?://(?:(?:www\.)?canalc2\.tv/video/|archives-canalc2\.u-strasbg\.fr/video\.asp\?.*\bidVideo=)(?P<id>\d+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.canalc2.tv/video/12163',
'md5': '060158428b650f896c542dfbb3d6487f',
'info_dict': {
'params': {
'skip_download': True, # Requires rtmpdump
}
- }
+ }, {
+ 'url': 'http://archives-canalc2.u-strasbg.fr/video.asp?idVideo=11427&voir=oui',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
- video_url = self._search_regex(
- r'jwplayer\((["\'])Player\1\)\.setup\({[^}]*file\s*:\s*(["\'])(?P<file>.+?)\2',
- webpage, 'video_url', group='file')
- formats = [{'url': video_url}]
- if video_url.startswith('rtmp://'):
- rtmp = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+/))(?P<play_path>mp4:.+)$', video_url)
- formats[0].update({
- 'url': rtmp.group('url'),
- 'ext': 'flv',
- 'app': rtmp.group('app'),
- 'play_path': rtmp.group('play_path'),
- 'page_url': url,
- })
+
+ webpage = self._download_webpage(
+ 'http://www.canalc2.tv/video/%s' % video_id, video_id)
+
+ formats = []
+ for _, video_url in re.findall(r'file\s*=\s*(["\'])(.+?)\1', webpage):
+ if video_url.startswith('rtmp://'):
+ rtmp = re.search(
+ r'^(?P<url>rtmp://[^/]+/(?P<app>.+/))(?P<play_path>mp4:.+)$', video_url)
+ formats.append({
+ 'url': rtmp.group('url'),
+ 'format_id': 'rtmp',
+ 'ext': 'flv',
+ 'app': rtmp.group('app'),
+ 'play_path': rtmp.group('play_path'),
+ 'page_url': url,
+ })
+ else:
+ formats.append({
+ 'url': video_url,
+ 'format_id': 'http',
+ })
+ self._sort_formats(formats)
title = self._html_search_regex(
r'(?s)class="[^"]*col_description[^"]*">.*?<h3>(.*?)</h3>', webpage, 'title')
unified_strdate,
url_basename,
qualities,
+ int_or_none,
)
class CanalplusIE(InfoExtractor):
IE_DESC = 'canalplus.fr, piwiplus.fr and d8.tv'
_VALID_URL = r'https?://(?:www\.(?P<site>canalplus\.fr|piwiplus\.fr|d8\.tv|itele\.fr)/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>[0-9]+))'
- _VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/%s/%s'
+ _VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/%s/%s?format=json'
_SITE_ID_MAP = {
'canalplus.fr': 'cplus',
'piwiplus.fr': 'teletoon',
_TESTS = [{
'url': 'http://www.canalplus.fr/c-emissions/pid1830-c-zapping.html?vid=1263092',
- 'md5': 'b3481d7ca972f61e37420798d0a9d934',
+ 'md5': '12164a6f14ff6df8bd628e8ba9b10b78',
'info_dict': {
'id': '1263092',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'Le Zapping - 13/05/15',
'description': 'md5:09738c0d06be4b5d06a0940edb0da73f',
'upload_date': '20150513',
'skip': 'videos get deleted after a while',
}, {
'url': 'http://www.itele.fr/france/video/aubervilliers-un-lycee-en-colere-111559',
- 'md5': 'f3a46edcdf28006598ffaf5b30e6a2d4',
+ 'md5': '38b8f7934def74f0d6f3ba6c036a5f82',
'info_dict': {
'id': '1213714',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'Aubervilliers : un lycée en colère - Le 11/02/2015 à 06h45',
'description': 'md5:8216206ec53426ea6321321f3b3c16db',
'upload_date': '20150211',
webpage, 'video id', group='id')
info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id)
- doc = self._download_xml(info_url, video_id, 'Downloading video XML')
+ video_data = self._download_json(info_url, video_id, 'Downloading video JSON')
- video_info = [video for video in doc if video.find('ID').text == video_id][0]
- media = video_info.find('MEDIA')
- infos = video_info.find('INFOS')
+ if isinstance(video_data, list):
+ video_data = [video for video in video_data if video.get('ID') == video_id][0]
+ media = video_data['MEDIA']
+ infos = video_data['INFOS']
- preference = qualities(['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD', 'HLS', 'HDS'])
+ preference = qualities(['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD'])
- fmt_url = next(iter(media.find('VIDEOS'))).text
+ fmt_url = next(iter(media.get('VIDEOS')))
if '/geo' in fmt_url.lower():
response = self._request_webpage(
HEADRequest(fmt_url), video_id,
expected=True)
formats = []
- for fmt in media.find('VIDEOS'):
- format_url = fmt.text
+ for format_id, format_url in media['VIDEOS'].items():
if not format_url:
continue
- format_id = fmt.tag
if format_id == 'HLS':
formats.extend(self._extract_m3u8_formats(
- format_url, video_id, 'mp4', preference=preference(format_id)))
+ format_url, video_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False))
elif format_id == 'HDS':
formats.extend(self._extract_f4m_formats(
- format_url + '?hdcore=2.11.3', video_id, preference=preference(format_id)))
+ format_url + '?hdcore=2.11.3', video_id, f4m_id=format_id, fatal=False))
else:
formats.append({
- 'url': format_url,
+ # the secret extracted ya function in http://player.canalplus.fr/common/js/canalPlayer.js
+ 'url': format_url + '?secret=pqzerjlsmdkjfoiuerhsdlfknaes',
'format_id': format_id,
'preference': preference(format_id),
})
self._sort_formats(formats)
+ thumbnails = [{
+ 'id': image_id,
+ 'url': image_url,
+ } for image_id, image_url in media.get('images', {}).items()]
+
+ titrage = infos['TITRAGE']
+
return {
'id': video_id,
'display_id': display_id,
- 'title': '%s - %s' % (infos.find('TITRAGE/TITRE').text,
- infos.find('TITRAGE/SOUS_TITRE').text),
- 'upload_date': unified_strdate(infos.find('PUBLICATION/DATE').text),
- 'thumbnail': media.find('IMAGES/GRAND').text,
- 'description': infos.find('DESCRIPTION').text,
- 'view_count': int(infos.find('NB_VUES').text),
- 'like_count': int(infos.find('NB_LIKES').text),
- 'comment_count': int(infos.find('NB_COMMENTS').text),
+ 'title': '%s - %s' % (titrage['TITRE'],
+ titrage['SOUS_TITRE']),
+ 'upload_date': unified_strdate(infos.get('PUBLICATION', {}).get('DATE')),
+ 'thumbnails': thumbnails,
+ 'description': infos.get('DESCRIPTION'),
+ 'duration': int_or_none(infos.get('DURATION')),
+ 'view_count': int_or_none(infos.get('NB_VUES')),
+ 'like_count': int_or_none(infos.get('NB_LIKES')),
+ 'comment_count': int_or_none(infos.get('NB_COMMENTS')),
'formats': formats,
}
--- /dev/null
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import float_or_none
+
+
+class CanvasIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?canvas\.be/video/(?:[^/]+/)*(?P<id>[^/?#&]+)'
+ _TESTS = [{
+ 'url': 'http://www.canvas.be/video/de-afspraak/najaar-2015/de-afspraak-veilt-voor-de-warmste-week',
+ 'md5': 'ea838375a547ac787d4064d8c7860a6c',
+ 'info_dict': {
+ 'id': 'mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
+ 'display_id': 'de-afspraak-veilt-voor-de-warmste-week',
+ 'ext': 'mp4',
+ 'title': 'De afspraak veilt voor de Warmste Week',
+ 'description': 'md5:24cb860c320dc2be7358e0e5aa317ba6',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'duration': 49.02,
+ }
+ }, {
+ # with subtitles
+ 'url': 'http://www.canvas.be/video/panorama/2016/pieter-0167',
+ 'info_dict': {
+ 'id': 'mz-ast-5240ff21-2d30-4101-bba6-92b5ec67c625',
+ 'display_id': 'pieter-0167',
+ 'ext': 'mp4',
+ 'title': 'Pieter 0167',
+ 'description': 'md5:943cd30f48a5d29ba02c3a104dc4ec4e',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'duration': 2553.08,
+ 'subtitles': {
+ 'nl': [{
+ 'ext': 'vtt',
+ }],
+ },
+ },
+ 'params': {
+ 'skip_download': True,
+ }
+ }]
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, display_id)
+
+ title = self._search_regex(
+ r'<h1[^>]+class="video__body__header__title"[^>]*>(.+?)</h1>',
+ webpage, 'title', default=None) or self._og_search_title(webpage)
+
+ video_id = self._html_search_regex(
+ r'data-video=(["\'])(?P<id>.+?)\1', webpage, 'video id', group='id')
+
+ data = self._download_json(
+ 'https://mediazone.vrt.be/api/v1/canvas/assets/%s' % video_id, display_id)
+
+ formats = []
+ for target in data['targetUrls']:
+ format_url, format_type = target.get('url'), target.get('type')
+ if not format_url or not format_type:
+ continue
+ if format_type == 'HLS':
+ formats.extend(self._extract_m3u8_formats(
+ format_url, display_id, entry_protocol='m3u8_native',
+ ext='mp4', preference=0, fatal=False, m3u8_id=format_type))
+ elif format_type == 'HDS':
+ formats.extend(self._extract_f4m_formats(
+ format_url, display_id, f4m_id=format_type, fatal=False))
+ else:
+ formats.append({
+ 'format_id': format_type,
+ 'url': format_url,
+ })
+ self._sort_formats(formats)
+
+ subtitles = {}
+ subtitle_urls = data.get('subtitleUrls')
+ if isinstance(subtitle_urls, list):
+ for subtitle in subtitle_urls:
+ subtitle_url = subtitle.get('url')
+ if subtitle_url and subtitle.get('type') == 'CLOSED':
+ subtitles.setdefault('nl', []).append({'url': subtitle_url})
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': title,
+ 'description': self._og_search_description(webpage),
+ 'formats': formats,
+ 'duration': float_or_none(data.get('duration'), 1000),
+ 'thumbnail': data.get('posterImageUrl'),
+ 'subtitles': subtitles,
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import js_to_json
+
+
+class CBCIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?cbc\.ca/(?:[^/]+/)+(?P<id>[^/?#]+)'
+ _TESTS = [{
+ # with mediaId
+ 'url': 'http://www.cbc.ca/22minutes/videos/clips-season-23/don-cherry-play-offs',
+ 'info_dict': {
+ 'id': '2682904050',
+ 'ext': 'flv',
+ 'title': 'Don Cherry – All-Stars',
+ 'description': 'Don Cherry has a bee in his bonnet about AHL player John Scott because that guy’s got heart.',
+ 'timestamp': 1454475540,
+ 'upload_date': '20160203',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }, {
+ # with clipId
+ 'url': 'http://www.cbc.ca/archives/entry/1978-robin-williams-freestyles-on-90-minutes-live',
+ 'info_dict': {
+ 'id': '2487345465',
+ 'ext': 'flv',
+ 'title': 'Robin Williams freestyles on 90 Minutes Live',
+ 'description': 'Wacky American comedian Robin Williams shows off his infamous "freestyle" comedic talents while being interviewed on CBC\'s 90 Minutes Live.',
+ 'upload_date': '19700101',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }, {
+ # multiple iframes
+ 'url': 'http://www.cbc.ca/natureofthings/blog/birds-eye-view-from-vancouvers-burrard-street-bridge-how-we-got-the-shot',
+ 'playlist': [{
+ 'info_dict': {
+ 'id': '2680832926',
+ 'ext': 'flv',
+ 'title': 'An Eagle\'s-Eye View Off Burrard Bridge',
+ 'description': 'Hercules the eagle flies from Vancouver\'s Burrard Bridge down to a nearby park with a mini-camera strapped to his back.',
+ 'upload_date': '19700101',
+ },
+ }, {
+ 'info_dict': {
+ 'id': '2658915080',
+ 'ext': 'flv',
+ 'title': 'Fly like an eagle!',
+ 'description': 'Eagle equipped with a mini camera flies from the world\'s tallest tower',
+ 'upload_date': '19700101',
+ },
+ }],
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }]
+
+ @classmethod
+ def suitable(cls, url):
+ return False if CBCPlayerIE.suitable(url) else super(CBCIE, cls).suitable(url)
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+ player_init = self._search_regex(
+ r'CBC\.APP\.Caffeine\.initInstance\(({.+?})\);', webpage, 'player init',
+ default=None)
+ if player_init:
+ player_info = self._parse_json(player_init, display_id, js_to_json)
+ media_id = player_info.get('mediaId')
+ if not media_id:
+ clip_id = player_info['clipId']
+ media_id = self._download_json(
+ 'http://feed.theplatform.com/f/h9dtGB/punlNGjMlc1F?fields=id&byContent=byReleases%3DbyId%253D' + clip_id,
+ clip_id)['entries'][0]['id'].split('/')[-1]
+ return self.url_result('cbcplayer:%s' % media_id, 'CBCPlayer', media_id)
+ else:
+ entries = [self.url_result('cbcplayer:%s' % media_id, 'CBCPlayer', media_id) for media_id in re.findall(r'<iframe[^>]+src="[^"]+?mediaId=(\d+)"', webpage)]
+ return self.playlist_result(entries)
+
+
+class CBCPlayerIE(InfoExtractor):
+ _VALID_URL = r'(?:cbcplayer:|https?://(?:www\.)?cbc\.ca/(?:player/play/|i/caffeine/syndicate/\?mediaId=))(?P<id>\d+)'
+ _TEST = {
+ 'url': 'http://www.cbc.ca/player/play/2683190193',
+ 'info_dict': {
+ 'id': '2683190193',
+ 'ext': 'flv',
+ 'title': 'Gerry Runs a Sweat Shop',
+ 'description': 'md5:b457e1c01e8ff408d9d801c1c2cd29b0',
+ 'timestamp': 1455067800,
+ 'upload_date': '20160210',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ return self.url_result(
+ 'http://feed.theplatform.com/f/ExhSPC/vms_5akSXx4Ng_Zn?byGuid=%s' % video_id,
+ 'ThePlatformFeed', video_id)
# encoding: utf-8
from __future__ import unicode_literals
-import re
-import json
-
from .common import InfoExtractor
+from .theplatform import ThePlatformIE
+from ..utils import (
+ parse_duration,
+ find_xpath_attr,
+)
-class CBSNewsIE(InfoExtractor):
+class CBSNewsIE(ThePlatformIE):
IE_DESC = 'CBS News'
- _VALID_URL = r'http://(?:www\.)?cbsnews\.com/(?:[^/]+/)+(?P<id>[\da-z_-]+)'
+ _VALID_URL = r'http://(?:www\.)?cbsnews\.com/(?:news|videos)/(?P<id>[\da-z_-]+)'
_TESTS = [
{
'url': 'http://www.cbsnews.com/videos/fort-hood-shooting-army-downplays-mental-illness-as-cause-of-attack/',
'info_dict': {
'id': 'fort-hood-shooting-army-downplays-mental-illness-as-cause-of-attack',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'Fort Hood shooting: Army downplays mental illness as cause of attack',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 205,
+ 'subtitles': {
+ 'en': [{
+ 'ext': 'ttml',
+ }],
+ },
},
'params': {
- # rtmp download
+ # m3u8 download
'skip_download': True,
},
},
]
+ def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
+ closed_caption_e = find_xpath_attr(smil, self._xpath_ns('.//param', namespace), 'name', 'ClosedCaptionURL')
+ return {
+ 'en': [{
+ 'ext': 'ttml',
+ 'url': closed_caption_e.attrib['value'],
+ }]
+ } if closed_caption_e is not None and closed_caption_e.attrib.get('value') else []
+
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
- video_info = json.loads(self._html_search_regex(
+ video_info = self._parse_json(self._html_search_regex(
r'(?:<ul class="media-list items" id="media-related-items"><li data-video-info|<div id="cbsNewsVideoPlayer" data-video-player-options)=\'({.+?})\'',
- webpage, 'video JSON info'))
+ webpage, 'video JSON info'), video_id)
item = video_info['item'] if 'item' in video_info else video_info
title = item.get('articleTitle') or item.get('hed')
duration = item.get('duration')
thumbnail = item.get('mediaImage') or item.get('thumbnail')
+ subtitles = {}
formats = []
for format_id in ['RtmpMobileLow', 'RtmpMobileHigh', 'Hls', 'RtmpDesktop']:
- uri = item.get('media' + format_id + 'URI')
- if not uri:
+ pid = item.get('media' + format_id)
+ if not pid:
continue
- fmt = {
- 'url': uri,
- 'format_id': format_id,
- }
- if uri.startswith('rtmp'):
- play_path = re.sub(
- r'{slistFilePath}', '',
- uri.split('<break>')[-1].split('{break}')[-1])
- fmt.update({
- 'app': 'ondemand?auth=cbs',
- 'play_path': 'mp4:' + play_path,
- 'player_url': 'http://www.cbsnews.com/[[IMPORT]]/vidtech.cbsinteractive.com/player/3_3_0/CBSI_PLAYER_HD.swf',
- 'page_url': 'http://www.cbsnews.com',
- 'ext': 'flv',
- })
- elif uri.endswith('.m3u8'):
- fmt['ext'] = 'mp4'
- formats.append(fmt)
+ release_url = 'http://link.theplatform.com/s/dJ5BDC/%s?format=SMIL&mbr=true' % pid
+ tp_formats, tp_subtitles = self._extract_theplatform_smil(release_url, video_id, 'Downloading %s SMIL data' % pid)
+ formats.extend(tp_formats)
+ subtitles = self._merge_subtitles(subtitles, tp_subtitles)
+ self._sort_formats(formats)
return {
'id': video_id,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
+ 'subtitles': subtitles,
+ }
+
+
+class CBSNewsLiveVideoIE(InfoExtractor):
+ IE_DESC = 'CBS News Live Videos'
+ _VALID_URL = r'http://(?:www\.)?cbsnews\.com/live/video/(?P<id>[\da-z_-]+)'
+
+ _TEST = {
+ 'url': 'http://www.cbsnews.com/live/video/clinton-sanders-prepare-to-face-off-in-nh/',
+ 'info_dict': {
+ 'id': 'clinton-sanders-prepare-to-face-off-in-nh',
+ 'ext': 'flv',
+ 'title': 'Clinton, Sanders Prepare To Face Off In NH',
+ 'duration': 334,
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ video_info = self._parse_json(self._html_search_regex(
+ r'data-story-obj=\'({.+?})\'', webpage, 'video JSON info'), video_id)['story']
+
+ hdcore_sign = 'hdcore=3.3.1'
+ f4m_formats = self._extract_f4m_formats(video_info['url'] + '&' + hdcore_sign, video_id)
+ if f4m_formats:
+ for entry in f4m_formats:
+ # URLs without the extra param induce an 404 error
+ entry.update({'extra_param_to_segment_url': hdcore_sign})
+
+ return {
+ 'id': video_id,
+ 'title': video_info['headline'],
+ 'thumbnail': video_info.get('thumbnail_url_hd') or video_info.get('thumbnail_url_sd'),
+ 'duration': parse_duration(video_info.get('segmentDur')),
+ 'formats': f4m_formats,
}
from .common import InfoExtractor
from ..utils import (
int_or_none,
+ parse_duration,
qualities,
unified_strdate,
)
class CCCIE(InfoExtractor):
IE_NAME = 'media.ccc.de'
- _VALID_URL = r'https?://(?:www\.)?media\.ccc\.de/[^?#]+/[^?#/]*?_(?P<id>[0-9]{8,})._[^?#/]*\.html'
+ _VALID_URL = r'https?://(?:www\.)?media\.ccc\.de/v/(?P<id>[^/?#&]+)'
- _TEST = {
- 'url': 'http://media.ccc.de/browse/congress/2013/30C3_-_5443_-_en_-_saal_g_-_201312281830_-_introduction_to_processor_design_-_byterazor.html#video',
+ _TESTS = [{
+ 'url': 'https://media.ccc.de/v/30C3_-_5443_-_en_-_saal_g_-_201312281830_-_introduction_to_processor_design_-_byterazor#video',
'md5': '3a1eda8f3a29515d27f5adb967d7e740',
'info_dict': {
- 'id': '20131228183',
+ 'id': '30C3_-_5443_-_en_-_saal_g_-_201312281830_-_introduction_to_processor_design_-_byterazor',
'ext': 'mp4',
'title': 'Introduction to Processor Design',
- 'description': 'md5:5ddbf8c734800267f2cee4eab187bc1b',
+ 'description': 'md5:80be298773966f66d56cb11260b879af',
'thumbnail': 're:^https?://.*\.jpg$',
'view_count': int,
- 'upload_date': '20131229',
+ 'upload_date': '20131228',
+ 'duration': 3660,
}
- }
+ }, {
+ 'url': 'https://media.ccc.de/v/32c3-7368-shopshifting#download',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
video_id = self._match_id(url)
title = self._html_search_regex(
r'(?s)<h1>(.*?)</h1>', webpage, 'title')
description = self._html_search_regex(
- r"(?s)<p class='description'>(.*?)</p>",
+ r'(?s)<h3>About</h3>(.+?)<h3>',
webpage, 'description', fatal=False)
upload_date = unified_strdate(self._html_search_regex(
- r"(?s)<span class='[^']*fa-calendar-o'></span>(.*?)</li>",
+ r"(?s)<span[^>]+class='[^']*fa-calendar-o'[^>]*>(.+?)</span>",
webpage, 'upload date', fatal=False))
view_count = int_or_none(self._html_search_regex(
r"(?s)<span class='[^']*fa-eye'></span>(.*?)</li>",
webpage, 'view count', fatal=False))
+ duration = parse_duration(self._html_search_regex(
+ r'(?s)<span[^>]+class=(["\']).*?fa-clock-o.*?\1[^>]*></span>(?P<duration>.+?)</li',
+ webpage, 'duration', fatal=False, group='duration'))
matches = re.finditer(r'''(?xs)
- <(?:span|div)\s+class='label\s+filetype'>(?P<format>.*?)</(?:span|div)>\s*
+ <(?:span|div)\s+class='label\s+filetype'>(?P<format>[^<]*)</(?:span|div)>\s*
+ <(?:span|div)\s+class='label\s+filetype'>(?P<lang>[^<]*)</(?:span|div)>\s*
<a\s+download\s+href='(?P<http_url>[^']+)'>\s*
(?:
.*?
- <a\s+href='(?P<torrent_url>[^']+\.torrent)'
+ <a\s+(?:download\s+)?href='(?P<torrent_url>[^']+\.torrent)'
)?''', webpage)
formats = []
for m in matches:
format_id = self._search_regex(
r'.*/([a-z0-9_-]+)/[^/]*$',
m.group('http_url'), 'format id', default=None)
+ if format_id:
+ format_id = m.group('lang') + '-' + format_id
vcodec = 'h264' if 'h264' in format_id else (
'none' if format_id in ('mp3', 'opus') else None
)
formats.append({
'format_id': format_id,
'format': format,
+ 'language': m.group('lang'),
'url': m.group('http_url'),
'vcodec': vcodec,
'preference': preference(format_id),
'thumbnail': thumbnail,
'view_count': view_count,
'upload_date': upload_date,
+ 'duration': duration,
'formats': formats,
}
for divider in [1000, 60, 60, 100]:
components.append(msec % divider)
msec //= divider
- return "{3:02}:{2:02}:{1:02},{0:03}".format(*components)
+ return '{3:02}:{2:02}:{1:02},{0:03}'.format(*components)
def _fix_subtitle(subtitle):
for line in subtitle.splitlines():
- m = re.match(r"^\s*([0-9]+);\s*([0-9]+)\s+([0-9]+)\s*$", line)
+ m = re.match(r'^\s*([0-9]+);\s*([0-9]+)\s+([0-9]+)\s*$', line)
if m:
yield m.group(1)
start, stop = (_msectotimecode(int(t)) for t in m.groups()[1:])
- yield "{0} --> {1}".format(start, stop)
+ yield '{0} --> {1}'.format(start, stop)
else:
yield line
- return "\r\n".join(_fix_subtitle(subtitles))
+ return '\r\n'.join(_fix_subtitle(subtitles))
'only_matching': True,
}]
+ _ROOM_OFFLINE = 'Room is currently offline'
+
def _real_extract(self, url):
video_id = self._match_id(url)
if not m3u8_url:
error = self._search_regex(
- r'<span[^>]+class=(["\'])desc_span\1[^>]*>(?P<error>[^<]+)</span>',
- webpage, 'error', group='error')
- raise ExtractorError(error, expected=True)
+ [r'<span[^>]+class=(["\'])desc_span\1[^>]*>(?P<error>[^<]+)</span>',
+ r'<div[^>]+id=(["\'])defchat\1[^>]*>\s*<p><strong>(?P<error>[^<]+)<'],
+ webpage, 'error', group='error', default=None)
+ if not error:
+ if any(p not in webpage for p in (
+ self._ROOM_OFFLINE, 'offline_tipping', 'tip_offline')):
+ error = self._ROOM_OFFLINE
+ if error:
+ raise ExtractorError(error, expected=True)
+ raise ExtractorError('Unable to find stream URL')
formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4')
from .common import InfoExtractor
from ..utils import ExtractorError
-from .bliptv import BlipTVIE
from .screenwavemedia import ScreenwaveMediaIE
},
},
{
- # blip.tv embedded video
+ # Youtube embedded video
'url': 'http://cinemassacre.com/2006/12/07/chronologically-confused-about-bad-movie-and-video-game-sequel-titles/',
- 'md5': 'ca9b3c8dd5a66f9375daeb5135f5a3de',
+ 'md5': 'df4cf8a1dcedaec79a73d96d83b99023',
'info_dict': {
- 'id': '4065369',
- 'ext': 'flv',
+ 'id': 'OEVzPCY2T-g',
+ 'ext': 'mp4',
'title': 'AVGN: Chronologically Confused about Bad Movie and Video Game Sequel Titles',
'upload_date': '20061207',
- 'uploader': 'cinemassacre',
- 'uploader_id': '250778',
- 'timestamp': 1283233867,
- 'description': 'md5:0a108c78d130676b207d0f6d029ecffd',
+ 'uploader': 'Cinemassacre',
+ 'uploader_id': 'JamesNintendoNerd',
+ 'description': 'md5:784734696c2b8b7f4b8625cc799e07f6',
}
},
{
r'<iframe[^>]+src="(?P<url>(?:https?:)?//(?:[^.]+\.)?youtube\.com/.+?)"',
],
webpage, 'player data URL', default=None, group='url')
- if not playerdata_url:
- playerdata_url = BlipTVIE._extract_url(webpage)
if not playerdata_url:
raise ExtractorError('Unable to find player data')
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
from ..utils import (
- determine_ext,
int_or_none,
- js_to_json,
- parse_iso8601,
- remove_end,
+ unified_strdate,
)
'id': '3966754',
'ext': 'mp4',
'title': 'FIFA 14 - E3 2013 Trailer',
- 'timestamp': 1370938118,
+ 'description': 'Video zu FIFA 14: E3 2013 Trailer',
'upload_date': '20130611',
'duration': 82,
+ 'view_count': int,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
-
- video_info = self._parse_json(
- js_to_json(self._html_search_regex(
- '(?s)videoObject\s*=\s*({.+?});', webpage, 'video object')),
- video_id)
+ video_info = self._download_json(
+ 'http://www.clipfish.de/devapi/id/%s?format=json&apikey=hbbtv' % video_id,
+ video_id)['items'][0]
formats = []
- for video_url in re.findall(r'var\s+videourl\s*=\s*"([^"]+)"', webpage):
- ext = determine_ext(video_url)
- if ext == 'm3u8':
- formats.append({
- 'url': video_url.replace('de.hls.fra.clipfish.de', 'hls.fra.clipfish.de'),
- 'ext': 'mp4',
- 'format_id': 'hls',
- })
- else:
- formats.append({
- 'url': video_url,
- 'format_id': ext,
- })
- self._sort_formats(formats)
- title = remove_end(self._og_search_title(webpage), ' - Video')
- thumbnail = self._og_search_thumbnail(webpage)
- duration = int_or_none(video_info.get('length'))
- timestamp = parse_iso8601(self._html_search_meta('uploadDate', webpage, 'upload date'))
+ m3u8_url = video_info.get('media_videourl_hls')
+ if m3u8_url:
+ formats.append({
+ 'url': m3u8_url.replace('de.hls.fra.clipfish.de', 'hls.fra.clipfish.de'),
+ 'ext': 'mp4',
+ 'format_id': 'hls',
+ })
+
+ mp4_url = video_info.get('media_videourl')
+ if mp4_url:
+ formats.append({
+ 'url': mp4_url,
+ 'format_id': 'mp4',
+ 'width': int_or_none(video_info.get('width')),
+ 'height': int_or_none(video_info.get('height')),
+ 'tbr': int_or_none(video_info.get('bitrate')),
+ })
return {
'id': video_id,
- 'title': title,
+ 'title': video_info['title'],
+ 'description': video_info.get('descr'),
'formats': formats,
- 'thumbnail': thumbnail,
- 'duration': duration,
- 'timestamp': timestamp,
+ 'thumbnail': video_info.get('media_content_thumbnail_large') or video_info.get('media_thumbnail'),
+ 'duration': int_or_none(video_info.get('media_length')),
+ 'upload_date': unified_strdate(video_info.get('pubDate')),
+ 'view_count': int_or_none(video_info.get('media_views'))
}
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..utils import determine_ext
+from ..utils import int_or_none
_translation_table = {
video_title = self._search_regex(
r'mediaTitle = "([^"]+)"', webpage, 'title')
- fmts = {}
- for fmt in ('mp4', 'flv'):
- fmt_list = self._parse_json(self._search_regex(
- r'var %sjson\s*=\s*(\[.*?\]);' % fmt, webpage, '%s formats' % fmt), video_id)
- for f in fmt_list:
- fmts[f['fname']] = _decode(f['sUrl'])
-
- qualities = self._parse_json(self._search_regex(
- r'var player_btns\s*=\s*(.*?);\n', webpage, 'quality info'), video_id)
+ gexo_files = self._parse_json(
+ self._search_regex(
+ r'var\s+gexoFiles\s*=\s*({.+?});', webpage, 'gexo files'),
+ video_id)
formats = []
- for fname, url in fmts.items():
- f = {
- 'url': url,
- }
- if fname in qualities:
- qual = qualities[fname]
- f.update({
- 'format_id': '%s_%sp' % (determine_ext(url), qual['h']),
- 'width': qual['w'],
- 'height': qual['h'],
- 'tbr': qual['br'],
- })
- formats.append(f)
-
+ for format_id, f in gexo_files.items():
+ video_url = f.get('url')
+ if not video_url:
+ continue
+ fmt = f.get('fmt')
+ height = f.get('h')
+ format_id = '%s_%sp' % (fmt, height) if fmt and height else format_id
+ formats.append({
+ 'url': _decode(video_url),
+ 'format_id': format_id,
+ 'width': int_or_none(f.get('w')),
+ 'height': int_or_none(height),
+ 'tbr': int_or_none(f.get('br')),
+ })
self._sort_formats(formats)
thumbnail = self._search_regex(
# coding: utf-8
from __future__ import unicode_literals
-import json
+from .theplatform import ThePlatformIE
+from ..utils import int_or_none
-from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
-)
-
-class CNETIE(InfoExtractor):
+class CNETIE(ThePlatformIE):
_VALID_URL = r'https?://(?:www\.)?cnet\.com/videos/(?P<id>[^/]+)/'
_TESTS = [{
'url': 'http://www.cnet.com/videos/hands-on-with-microsofts-windows-8-1-update/',
'ext': 'flv',
'title': 'Hands-on with Microsoft Windows 8.1 Update',
'description': 'The new update to the Windows 8 OS brings improved performance for mouse and keyboard users.',
- 'thumbnail': 're:^http://.*/flmswindows8.jpg$',
'uploader_id': '6085384d-619e-11e3-b231-14feb5ca9861',
'uploader': 'Sarah Mitroff',
+ 'duration': 70,
},
- 'params': {
- 'skip_download': 'requires rtmpdump',
- }
}, {
'url': 'http://www.cnet.com/videos/whiny-pothole-tweets-at-local-government-when-hit-by-cars-tomorrow-daily-187/',
'info_dict': {
'id': '56527b93-d25d-44e3-b738-f989ce2e49ba',
'ext': 'flv',
+ 'title': 'Whiny potholes tweet at local government when hit by cars (Tomorrow Daily 187)',
'description': 'Khail and Ashley wonder what other civic woes can be solved by self-tweeting objects, investigate a new kind of VR camera and watch an origami robot self-assemble, walk, climb, dig and dissolve. #TDPothole',
'uploader_id': 'b163284d-6b73-44fc-b3e6-3da66c392d40',
'uploader': 'Ashley Esqueda',
- 'title': 'Whiny potholes tweet at local government when hit by cars (Tomorrow Daily 187)',
- },
- 'params': {
- 'skip_download': True, # requires rtmpdump
+ 'duration': 1482,
},
}]
webpage = self._download_webpage(url, display_id)
data_json = self._html_search_regex(
- r"<div class=\"cnetVideoPlayer\"\s+.*?data-cnet-video-options='([^']+)'",
+ r"data-cnet-video(?:-uvp)?-options='([^']+)'",
webpage, 'data json')
- data = json.loads(data_json)
- vdata = data['video']
- if not vdata:
- vdata = data['videos'][0]
- if not vdata:
- raise ExtractorError('Cannot find video data')
-
- mpx_account = data['config']['players']['default']['mpx_account']
- vid = vdata['files'].get('rtmp', vdata['files']['hds'])
- tp_link = 'http://link.theplatform.com/s/%s/%s' % (mpx_account, vid)
+ data = self._parse_json(data_json, display_id)
+ vdata = data.get('video') or data['videos'][0]
video_id = vdata['id']
- title = vdata.get('headline')
- if title is None:
- title = vdata.get('title')
- if title is None:
- raise ExtractorError('Cannot find title!')
- thumbnail = vdata.get('image', {}).get('path')
+ title = vdata['title']
author = vdata.get('author')
if author:
uploader = '%s %s' % (author['firstName'], author['lastName'])
uploader = None
uploader_id = None
+ mpx_account = data['config']['uvpConfig']['default']['mpx_account']
+
+ metadata = self.get_metadata('%s/%s' % (mpx_account, list(vdata['files'].values())[0]), video_id)
+ description = vdata.get('description') or metadata.get('description')
+ duration = int_or_none(vdata.get('duration')) or metadata.get('duration')
+
+ formats = []
+ subtitles = {}
+ for (fkey, vid) in vdata['files'].items():
+ if fkey == 'hls_phone' and 'hls_tablet' in vdata['files']:
+ continue
+ release_url = 'http://link.theplatform.com/s/%s/%s?format=SMIL&mbr=true' % (mpx_account, vid)
+ if fkey == 'hds':
+ release_url += '&manifest=f4m'
+ tp_formats, tp_subtitles = self._extract_theplatform_smil(release_url, video_id, 'Downloading %s SMIL data' % fkey)
+ formats.extend(tp_formats)
+ subtitles = self._merge_subtitles(subtitles, tp_subtitles)
+ self._sort_formats(formats)
+
return {
- '_type': 'url_transparent',
- 'url': tp_link,
'id': video_id,
'display_id': display_id,
'title': title,
+ 'description': description,
+ 'thumbnail': metadata.get('thumbnail'),
+ 'duration': duration,
'uploader': uploader,
'uploader_id': uploader_id,
- 'thumbnail': thumbnail,
+ 'subtitles': subtitles,
+ 'formats': formats,
}
'upload_date': '20130609',
},
}, {
- "url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
- "md5": "b5cc60c60a3477d185af8f19a2a26f4e",
- "info_dict": {
+ 'url': 'http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29',
+ 'md5': 'b5cc60c60a3477d185af8f19a2a26f4e',
+ 'info_dict': {
'id': 'us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology',
'ext': 'mp4',
- "title": "Student's epic speech stuns new freshmen",
- "description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
- "upload_date": "20130821",
+ 'title': "Student's epic speech stuns new freshmen",
+ 'description': "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
+ 'upload_date': '20130821',
}
}, {
'url': 'http://www.cnn.com/video/data/2.0/video/living/2014/12/22/growing-america-nashville-salemtown-board-episode-1.hln.html',
video_id = self._match_id(url)
player_options_request = {
- "getPlayerOptionsRequest": {
- "ResourceId": video_id,
- "QueryString": "",
+ 'getPlayerOptionsRequest': {
+ 'ResourceId': video_id,
+ 'QueryString': '',
}
}
# encoding: utf-8
from __future__ import unicode_literals
-import json
-
from .common import InfoExtractor
-from ..utils import parse_iso8601
+from ..compat import compat_str
+from ..utils import (
+ int_or_none,
+ parse_duration,
+ parse_iso8601,
+)
class ComCarCoffIE(InfoExtractor):
_TESTS = [{
'url': 'http://comediansincarsgettingcoffee.com/miranda-sings-happy-thanksgiving-miranda/',
'info_dict': {
- 'id': 'miranda-sings-happy-thanksgiving-miranda',
+ 'id': '2494164',
'ext': 'mp4',
'upload_date': '20141127',
'timestamp': 1417107600,
+ 'duration': 1232,
'title': 'Happy Thanksgiving Miranda',
'description': 'Jerry Seinfeld and his special guest Miranda Sings cruise around town in search of coffee, complaining and apologizing along the way.',
- 'thumbnail': 'http://ccc.crackle.com/images/s5e4_thumb.jpg',
},
'params': {
'skip_download': 'requires ffmpeg',
display_id = 'comediansincarsgettingcoffee.com'
webpage = self._download_webpage(url, display_id)
- full_data = json.loads(self._search_regex(
- r'<script type="application/json" id="videoData">(?P<json>.+?)</script>',
- webpage, 'full data json'))
+ full_data = self._parse_json(
+ self._search_regex(
+ r'window\.app\s*=\s*({.+?});\n', webpage, 'full data json'),
+ display_id)['videoData']
- video_id = full_data['activeVideo']['video']
- video_data = full_data.get('videos', {}).get(video_id) or full_data['singleshots'][video_id]
+ display_id = full_data['activeVideo']['video']
+ video_data = full_data.get('videos', {}).get(display_id) or full_data['singleshots'][display_id]
+ video_id = compat_str(video_data['mediaId'])
thumbnails = [{
'url': video_data['images']['thumb'],
}, {
'url': video_data['images']['poster'],
}]
- formats = self._extract_m3u8_formats(
- video_data['mediaUrl'], video_id, ext='mp4')
+
+ timestamp = int_or_none(video_data.get('pubDateTime')) or parse_iso8601(
+ video_data.get('pubDate'))
+ duration = int_or_none(video_data.get('durationSeconds')) or parse_duration(
+ video_data.get('duration'))
return {
+ '_type': 'url_transparent',
+ 'url': 'crackle:%s' % video_id,
'id': video_id,
'display_id': display_id,
'title': video_data['title'],
'description': video_data.get('description'),
- 'timestamp': parse_iso8601(video_data.get('pubDate')),
+ 'timestamp': timestamp,
+ 'duration': duration,
'thumbnails': thumbnails,
- 'formats': formats,
+ 'season_number': int_or_none(video_data.get('season')),
+ 'episode_number': int_or_none(video_data.get('episode')),
'webpage_url': 'http://comediansincarsgettingcoffee.com/%s' % (video_data.get('urlSlug', video_data.get('slug'))),
}
class ComedyCentralIE(MTVServicesInfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/
- (video-clips|episodes|cc-studios|video-collections|full-episodes)
+ (video-clips|episodes|cc-studios|video-collections|full-episodes|shows)
/(?P<title>.*)'''
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
'md5': 'c4f48e9eda1b16dd10add0744344b6d8',
'info_dict': {
'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother',
'description': 'After a certain point, breastfeeding becomes c**kblocking.',
},
- }
+ }, {
+ 'url': 'http://www.cc.com/shows/the-daily-show-with-trevor-noah/interviews/6yx39d/exclusive-rand-paul-extended-interview',
+ 'only_matching': True,
+ }]
class ComedyCentralShowsIE(MTVServicesInfoExtractor):
if len(altMovieParams) == 0:
raise ExtractorError('unable to find Flash URL in webpage ' + url)
else:
- mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
+ mMovieParams = [('http://media.mtvnservices.com/' + altMovieParams[0], altMovieParams[0])]
uri = mMovieParams[0][1]
# Correct cc.com in uri
import socket
import sys
import time
+import math
from ..compat import (
compat_cookiejar,
compat_http_client,
compat_urllib_error,
compat_urllib_parse,
- compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
compat_etree_fromstring,
clean_html,
compiled_regex_type,
determine_ext,
+ error_to_compat_str,
ExtractorError,
fix_xml_ampersands,
float_or_none,
int_or_none,
+ parse_iso8601,
RegexNotFoundError,
sanitize_filename,
sanitized_Request,
url_basename,
xpath_text,
xpath_with_ns,
+ determine_protocol,
+ parse_duration,
+ mimetype2ext,
)
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
- * language_preference Is this in the correct requested
- language?
+ * language Language code, e.g. "de" or "en-US".
+ * language_preference Is this in the language mentioned in
+ the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
- duration: Length of the video in seconds, as an integer.
+ duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
+ The following fields should only be used when the video belongs to some logical
+ chapter or section:
+
+ chapter: Name or title of the chapter the video belongs to.
+ chapter_number: Number of the chapter the video belongs to, as an integer.
+ chapter_id: Id of the chapter the video belongs to, as a unicode string.
+
+ The following fields should only be used when the video is an episode of some
+ series or programme:
+
+ series: Title of the series or programme the video episode belongs to.
+ season: Title of the season the video episode belongs to.
+ season_number: Number of the season the video episode belongs to, as an integer.
+ season_id: Id of the season the video episode belongs to, as a unicode string.
+ episode: Title of the video episode. Unlike mandatory video title field,
+ this field should denote the exact title of the video episode
+ without any kind of decoration.
+ episode_number: Number of the video episode within a season, as an integer.
+ episode_id: Id of the video episode, as a unicode string.
+
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
- raise ExtractorError('A network error has occured.', cause=e, expected=True)
+ raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
- raise ExtractorError('An extractor error has occured.', cause=e)
+ raise ExtractorError('An extractor error has occurred.', cause=e)
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
return False
if errnote is None:
errnote = 'Unable to download webpage'
- errmsg = '%s: %s' % (errnote, compat_str(err))
+
+ errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
- if downloader_params.get('username', None) is not None:
+ if downloader_params.get('username') is not None:
username = downloader_params['username']
password = downloader_params['password']
elif downloader_params.get('usenetrc', False):
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
- self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
+ self._downloader.report_warning('parsing .netrc: %s' % error_to_compat_str(err))
return (username, password)
return None
downloader_params = self._downloader.params
- if downloader_params.get('twofactor', None) is not None:
+ if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
'mature': 17,
'restricted': 19,
}
- return RATING_TABLE.get(rating.lower(), None)
+ return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
'0': 18,
'false': 18,
}
- return RATING_TABLE.get(family_friendly.lower(), None)
+ return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
+ def _search_json_ld(self, html, video_id, **kwargs):
+ json_ld = self._search_regex(
+ r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
+ html, 'JSON-LD', group='json_ld', **kwargs)
+ if not json_ld:
+ return {}
+ return self._json_ld(json_ld, video_id, fatal=kwargs.get('fatal', True))
+
+ def _json_ld(self, json_ld, video_id, fatal=True):
+ if isinstance(json_ld, compat_str):
+ json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
+ if not json_ld:
+ return {}
+ info = {}
+ if json_ld.get('@context') == 'http://schema.org':
+ item_type = json_ld.get('@type')
+ if item_type == 'TVEpisode':
+ info.update({
+ 'episode': unescapeHTML(json_ld.get('name')),
+ 'episode_number': int_or_none(json_ld.get('episodeNumber')),
+ 'description': unescapeHTML(json_ld.get('description')),
+ })
+ part_of_season = json_ld.get('partOfSeason')
+ if isinstance(part_of_season, dict) and part_of_season.get('@type') == 'TVSeason':
+ info['season_number'] = int_or_none(part_of_season.get('seasonNumber'))
+ part_of_series = json_ld.get('partOfSeries')
+ if isinstance(part_of_series, dict) and part_of_series.get('@type') == 'TVSeries':
+ info['series'] = unescapeHTML(part_of_series.get('name'))
+ elif item_type == 'Article':
+ info.update({
+ 'timestamp': parse_iso8601(json_ld.get('datePublished')),
+ 'title': unescapeHTML(json_ld.get('headline')),
+ 'description': unescapeHTML(json_ld.get('articleBody')),
+ })
+ return dict((k, v) for k, v in info.items() if v is not None)
+
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
if not formats:
raise ExtractorError('No video formats found')
+ for f in formats:
+ # Automatically determine tbr when missing based on abr and vbr (improves
+ # formats sorting in some cases)
+ if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
+ f['tbr'] = f['abr'] + f['vbr']
+
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
preference = f.get('preference')
if preference is None:
- proto = f.get('protocol')
- if proto is None:
- proto = compat_urllib_parse_urlparse(f.get('url', '')).scheme
-
- preference = 0 if proto in ['http', 'https'] else -0.1
+ preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
+ proto_preference = 0 if determine_protocol(f) in ['http', 'https'] else -0.1
+
if f.get('vcodec') == 'none': # audio only
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
+ proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
+ @staticmethod
+ def _remove_duplicate_formats(formats):
+ format_urls = set()
+ unique_formats = []
+ for f in formats:
+ if f['url'] not in format_urls:
+ format_urls.add(f['url'])
+ unique_formats.append(f)
+ formats[:] = unique_formats
+
def _is_valid_url(self, url, video_id, item='video'):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
fatal=fatal)
if manifest is False:
- return manifest
+ return []
formats = []
manifest_version = '1.0'
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
if determine_ext(manifest_url) == 'f4m':
- f4m_formats = self._extract_f4m_formats(
- manifest_url, video_id, preference, f4m_id, fatal=fatal)
- if f4m_formats:
- formats.extend(f4m_formats)
+ formats.extend(self._extract_f4m_formats(
+ manifest_url, video_id, preference, f4m_id, fatal=fatal))
continue
tbr = int_or_none(media_el.attrib.get('bitrate'))
formats.append({
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if res is False:
- return res
+ return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
+ # A Media Playlist Tag MUST NOT appear in a Master Playlist
+ # https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3
+ # The EXT-X-TARGETDURATION tag is REQUIRED for every M3U8 Media Playlists
+ # https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1
+ if '#EXT-X-TARGETDURATION' in m3u8_doc:
+ return [{
+ 'url': m3u8_url,
+ 'format_id': m3u8_id,
+ 'ext': ext,
+ 'protocol': entry_protocol,
+ 'preference': preference,
+ }]
last_info = None
last_media = None
kv_rex = re.compile(
# TODO: looks like video codec is not always necessarily goes first
va_codecs = codecs.split(',')
if va_codecs[0]:
- f['vcodec'] = va_codecs[0].partition('.')[0]
+ f['vcodec'] = va_codecs[0]
if len(va_codecs) > 1 and va_codecs[1]:
- f['acodec'] = va_codecs[1].partition('.')[0]
+ f['acodec'] = va_codecs[1]
resolution = last_info.get('RESOLUTION')
if resolution:
width_str, height_str = resolution.split('x')
formats = []
rtmp_count = 0
http_count = 0
+ m3u8_count = 0
+ srcs = []
videos = smil.findall(self._xpath_ns('.//video', namespace))
for video in videos:
src = video.get('src')
- if not src:
+ if not src or src in srcs:
continue
+ srcs.append(src)
bitrate = float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
filesize = int_or_none(video.get('size') or video.get('fileSize'))
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
+ src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
- if m3u8_formats:
- formats.extend(m3u8_formats)
+ if len(m3u8_formats) == 1:
+ m3u8_count += 1
+ m3u8_formats[0].update({
+ 'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
+ 'tbr': bitrate,
+ 'width': width,
+ 'height': height,
+ })
+ formats.extend(m3u8_formats)
continue
if src_ext == 'f4m':
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse.urlencode(f4m_params)
- f4m_formats = self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False)
- if f4m_formats:
- formats.extend(f4m_formats)
+ formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
continue
if src_url.startswith('http') and self._is_valid_url(src, video_id):
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
+ urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
- if not src:
+ if not src or src in urls:
continue
- ext = textstream.get('ext') or determine_ext(src)
- if not ext:
- type_ = textstream.get('type')
- SUBTITLES_TYPES = {
- 'text/vtt': 'vtt',
- 'text/srt': 'srt',
- 'application/smptett+xml': 'tt',
- }
- if type_ in SUBTITLES_TYPES:
- ext = SUBTITLES_TYPES[type_]
+ urls.append(src)
+ ext = textstream.get('ext') or determine_ext(src) or mimetype2ext(textstream.get('type'))
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
})
return entries
+ def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
+ res = self._download_webpage_handle(
+ mpd_url, video_id,
+ note=note or 'Downloading MPD manifest',
+ errnote=errnote or 'Failed to download MPD manifest',
+ fatal=fatal)
+ if res is False:
+ return []
+ mpd, urlh = res
+ mpd_base_url = re.match(r'https?://.+/', urlh.geturl()).group()
+
+ return self._parse_mpd_formats(
+ compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url, formats_dict=formats_dict)
+
+ def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}):
+ if mpd_doc.get('type') == 'dynamic':
+ return []
+
+ namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
+
+ def _add_ns(path):
+ return self._xpath_ns(path, namespace)
+
+ def is_drm_protected(element):
+ return element.find(_add_ns('ContentProtection')) is not None
+
+ def extract_multisegment_info(element, ms_parent_info):
+ ms_info = ms_parent_info.copy()
+ segment_list = element.find(_add_ns('SegmentList'))
+ if segment_list is not None:
+ segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
+ if segment_urls_e:
+ ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
+ initialization = segment_list.find(_add_ns('Initialization'))
+ if initialization is not None:
+ ms_info['initialization_url'] = initialization.attrib['sourceURL']
+ else:
+ segment_template = element.find(_add_ns('SegmentTemplate'))
+ if segment_template is not None:
+ start_number = segment_template.get('startNumber')
+ if start_number:
+ ms_info['start_number'] = int(start_number)
+ segment_timeline = segment_template.find(_add_ns('SegmentTimeline'))
+ if segment_timeline is not None:
+ s_e = segment_timeline.findall(_add_ns('S'))
+ if s_e:
+ ms_info['total_number'] = 0
+ for s in s_e:
+ ms_info['total_number'] += 1 + int(s.get('r', '0'))
+ else:
+ timescale = segment_template.get('timescale')
+ if timescale:
+ ms_info['timescale'] = int(timescale)
+ segment_duration = segment_template.get('duration')
+ if segment_duration:
+ ms_info['segment_duration'] = int(segment_duration)
+ media_template = segment_template.get('media')
+ if media_template:
+ ms_info['media_template'] = media_template
+ initialization = segment_template.get('initialization')
+ if initialization:
+ ms_info['initialization_url'] = initialization
+ else:
+ initialization = segment_template.find(_add_ns('Initialization'))
+ if initialization is not None:
+ ms_info['initialization_url'] = initialization.attrib['sourceURL']
+ return ms_info
+
+ mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
+ formats = []
+ for period in mpd_doc.findall(_add_ns('Period')):
+ period_duration = parse_duration(period.get('duration')) or mpd_duration
+ period_ms_info = extract_multisegment_info(period, {
+ 'start_number': 1,
+ 'timescale': 1,
+ })
+ for adaptation_set in period.findall(_add_ns('AdaptationSet')):
+ if is_drm_protected(adaptation_set):
+ continue
+ adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
+ for representation in adaptation_set.findall(_add_ns('Representation')):
+ if is_drm_protected(representation):
+ continue
+ representation_attrib = adaptation_set.attrib.copy()
+ representation_attrib.update(representation.attrib)
+ mime_type = representation_attrib.get('mimeType')
+ content_type = mime_type.split('/')[0] if mime_type else representation_attrib.get('contentType')
+ if content_type == 'text':
+ # TODO implement WebVTT downloading
+ pass
+ elif content_type == 'video' or content_type == 'audio':
+ base_url = ''
+ for element in (representation, adaptation_set, period, mpd_doc):
+ base_url_e = element.find(_add_ns('BaseURL'))
+ if base_url_e is not None:
+ base_url = base_url_e.text + base_url
+ if re.match(r'^https?://', base_url):
+ break
+ if mpd_base_url and not re.match(r'^https?://', base_url):
+ if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
+ mpd_base_url += '/'
+ base_url = mpd_base_url + base_url
+ representation_id = representation_attrib.get('id')
+ lang = representation_attrib.get('lang')
+ url_el = representation.find(_add_ns('BaseURL'))
+ filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
+ f = {
+ 'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
+ 'url': base_url,
+ 'width': int_or_none(representation_attrib.get('width')),
+ 'height': int_or_none(representation_attrib.get('height')),
+ 'tbr': int_or_none(representation_attrib.get('bandwidth'), 1000),
+ 'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
+ 'fps': int_or_none(representation_attrib.get('frameRate')),
+ 'vcodec': 'none' if content_type == 'audio' else representation_attrib.get('codecs'),
+ 'acodec': 'none' if content_type == 'video' else representation_attrib.get('codecs'),
+ 'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
+ 'format_note': 'DASH %s' % content_type,
+ 'filesize': filesize,
+ }
+ representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
+ if 'segment_urls' not in representation_ms_info and 'media_template' in representation_ms_info:
+ if 'total_number' not in representation_ms_info and 'segment_duration':
+ segment_duration = float(representation_ms_info['segment_duration']) / float(representation_ms_info['timescale'])
+ representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
+ media_template = representation_ms_info['media_template']
+ media_template = media_template.replace('$RepresentationID$', representation_id)
+ media_template = re.sub(r'\$(Number|Bandwidth)(?:%(0\d+)d)?\$', r'%(\1)\2d', media_template)
+ media_template.replace('$$', '$')
+ representation_ms_info['segment_urls'] = [media_template % {'Number': segment_number, 'Bandwidth': representation_attrib.get('bandwidth')} for segment_number in range(representation_ms_info['start_number'], representation_ms_info['total_number'] + representation_ms_info['start_number'])]
+ if 'segment_urls' in representation_ms_info:
+ f.update({
+ 'segment_urls': representation_ms_info['segment_urls'],
+ 'protocol': 'http_dash_segments',
+ })
+ if 'initialization_url' in representation_ms_info:
+ initialization_url = representation_ms_info['initialization_url'].replace('$RepresentationID$', representation_id)
+ f.update({
+ 'initialization_url': initialization_url,
+ })
+ if not f.get('url'):
+ f['url'] = initialization_url
+ try:
+ existing_format = next(
+ fo for fo in formats
+ if fo['format_id'] == representation_id)
+ except StopIteration:
+ full_info = formats_dict.get(representation_id, {}).copy()
+ full_info.update(f)
+ formats.append(full_info)
+ else:
+ existing_format.update(f)
+ else:
+ self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
+ self._sort_formats(formats)
+ return formats
+
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
- now_str = now.strftime("%Y-%m-%d %H:%M")
+ now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
return {}
def _get_subtitles(self, *args, **kwargs):
- raise NotImplementedError("This method must be implemented by subclasses")
+ raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
return {}
def _get_automatic_captions(self, *args, **kwargs):
- raise NotImplementedError("This method must be implemented by subclasses")
+ raise NotImplementedError('This method must be implemented by subclasses')
class SearchInfoExtractor(InfoExtractor):
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
- raise NotImplementedError("This method must be implemented by subclasses")
+ raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import int_or_none
+
+
+class CrackleIE(InfoExtractor):
+ _VALID_URL = r'(?:crackle:|https?://(?:www\.)?crackle\.com/(?:playlist/\d+/|(?:[^/]+/)+))(?P<id>\d+)'
+ _TEST = {
+ 'url': 'http://www.crackle.com/the-art-of-more/2496419',
+ 'info_dict': {
+ 'id': '2496419',
+ 'ext': 'mp4',
+ 'title': 'Heavy Lies the Head',
+ 'description': 'md5:bb56aa0708fe7b9a4861535f15c3abca',
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ }
+ }
+
+ # extracted from http://legacyweb-us.crackle.com/flash/QueryReferrer.ashx
+ _SUBTITLE_SERVER = 'http://web-us-az.crackle.com'
+ _UPLYNK_OWNER_ID = 'e8773f7770a44dbd886eee4fca16a66b'
+ _THUMBNAIL_TEMPLATE = 'http://images-us-am.crackle.com/%stnl_1920x1080.jpg?ts=20140107233116?c=635333335057637614'
+
+ # extracted from http://legacyweb-us.crackle.com/flash/ReferrerRedirect.ashx
+ _MEDIA_FILE_SLOTS = {
+ 'c544.flv': {
+ 'width': 544,
+ 'height': 306,
+ },
+ '360p.mp4': {
+ 'width': 640,
+ 'height': 360,
+ },
+ '480p.mp4': {
+ 'width': 852,
+ 'height': 478,
+ },
+ '480p_1mbps.mp4': {
+ 'width': 852,
+ 'height': 478,
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ item = self._download_xml(
+ 'http://legacyweb-us.crackle.com/app/revamp/vidwallcache.aspx?flags=-1&fm=%s' % video_id,
+ video_id).find('i')
+ title = item.attrib['t']
+
+ thumbnail = None
+ subtitles = {}
+ formats = self._extract_m3u8_formats(
+ 'http://content.uplynk.com/ext/%s/%s.m3u8' % (self._UPLYNK_OWNER_ID, video_id),
+ video_id, 'mp4', m3u8_id='hls', fatal=None)
+ path = item.attrib.get('p')
+ if path:
+ thumbnail = self._THUMBNAIL_TEMPLATE % path
+ http_base_url = 'http://ahttp.crackle.com/' + path
+ for mfs_path, mfs_info in self._MEDIA_FILE_SLOTS.items():
+ formats.append({
+ 'url': http_base_url + mfs_path,
+ 'format_id': 'http-' + mfs_path.split('.')[0],
+ 'width': mfs_info['width'],
+ 'height': mfs_info['height'],
+ })
+ for cc in item.findall('cc'):
+ locale = cc.attrib.get('l')
+ v = cc.attrib.get('v')
+ if locale and v:
+ if locale not in subtitles:
+ subtitles[locale] = []
+ subtitles[locale] = [{
+ 'url': '%s/%s%s_%s.xml' % (self._SUBTITLE_SERVER, path, locale, v),
+ 'ext': 'ttml',
+ }]
+ self._sort_formats(formats, ('width', 'height', 'tbr', 'format_id'))
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': item.attrib.get('d'),
+ 'duration': int(item.attrib.get('r'), 16) if item.attrib.get('r') else None,
+ 'series': item.attrib.get('sn'),
+ 'season_number': int_or_none(item.attrib.get('se')),
+ 'episode_number': int_or_none(item.attrib.get('ep')),
+ 'thumbnail': thumbnail,
+ 'subtitles': subtitles,
+ 'formats': formats,
+ }
return assvalue
output = '[Script Info]\n'
- output += 'Title: %s\n' % sub_root.attrib["title"]
+ output += 'Title: %s\n' % sub_root.attrib['title']
output += 'ScriptType: v4.00+\n'
- output += 'WrapStyle: %s\n' % sub_root.attrib["wrap_style"]
- output += 'PlayResX: %s\n' % sub_root.attrib["play_res_x"]
- output += 'PlayResY: %s\n' % sub_root.attrib["play_res_y"]
+ output += 'WrapStyle: %s\n' % sub_root.attrib['wrap_style']
+ output += 'PlayResX: %s\n' % sub_root.attrib['play_res_x']
+ output += 'PlayResY: %s\n' % sub_root.attrib['play_res_y']
output += """ScaledBorderAndShadow: yes
[V4+ Styles]
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
"""
for style in sub_root.findall('./styles/style'):
- output += 'Style: ' + style.attrib["name"]
- output += ',' + style.attrib["font_name"]
- output += ',' + style.attrib["font_size"]
- output += ',' + style.attrib["primary_colour"]
- output += ',' + style.attrib["secondary_colour"]
- output += ',' + style.attrib["outline_colour"]
- output += ',' + style.attrib["back_colour"]
- output += ',' + ass_bool(style.attrib["bold"])
- output += ',' + ass_bool(style.attrib["italic"])
- output += ',' + ass_bool(style.attrib["underline"])
- output += ',' + ass_bool(style.attrib["strikeout"])
- output += ',' + style.attrib["scale_x"]
- output += ',' + style.attrib["scale_y"]
- output += ',' + style.attrib["spacing"]
- output += ',' + style.attrib["angle"]
- output += ',' + style.attrib["border_style"]
- output += ',' + style.attrib["outline"]
- output += ',' + style.attrib["shadow"]
- output += ',' + style.attrib["alignment"]
- output += ',' + style.attrib["margin_l"]
- output += ',' + style.attrib["margin_r"]
- output += ',' + style.attrib["margin_v"]
- output += ',' + style.attrib["encoding"]
+ output += 'Style: ' + style.attrib['name']
+ output += ',' + style.attrib['font_name']
+ output += ',' + style.attrib['font_size']
+ output += ',' + style.attrib['primary_colour']
+ output += ',' + style.attrib['secondary_colour']
+ output += ',' + style.attrib['outline_colour']
+ output += ',' + style.attrib['back_colour']
+ output += ',' + ass_bool(style.attrib['bold'])
+ output += ',' + ass_bool(style.attrib['italic'])
+ output += ',' + ass_bool(style.attrib['underline'])
+ output += ',' + ass_bool(style.attrib['strikeout'])
+ output += ',' + style.attrib['scale_x']
+ output += ',' + style.attrib['scale_y']
+ output += ',' + style.attrib['spacing']
+ output += ',' + style.attrib['angle']
+ output += ',' + style.attrib['border_style']
+ output += ',' + style.attrib['outline']
+ output += ',' + style.attrib['shadow']
+ output += ',' + style.attrib['alignment']
+ output += ',' + style.attrib['margin_l']
+ output += ',' + style.attrib['margin_r']
+ output += ',' + style.attrib['margin_v']
+ output += ',' + style.attrib['encoding']
output += '\n'
output += """
"""
for event in sub_root.findall('./events/event'):
output += 'Dialogue: 0'
- output += ',' + event.attrib["start"]
- output += ',' + event.attrib["end"]
- output += ',' + event.attrib["style"]
- output += ',' + event.attrib["name"]
- output += ',' + event.attrib["margin_l"]
- output += ',' + event.attrib["margin_r"]
- output += ',' + event.attrib["margin_v"]
- output += ',' + event.attrib["effect"]
- output += ',' + event.attrib["text"]
+ output += ',' + event.attrib['start']
+ output += ',' + event.attrib['end']
+ output += ',' + event.attrib['style']
+ output += ',' + event.attrib['name']
+ output += ',' + event.attrib['margin_l']
+ output += ',' + event.attrib['margin_r']
+ output += ',' + event.attrib['margin_v']
+ output += ',' + event.attrib['effect']
+ output += ',' + event.attrib['text']
output += '\n'
return output
streamdata_req, video_id,
note='Downloading media info for %s' % video_format)
stream_info = streamdata.find('./{default}preload/stream_info')
- video_url = stream_info.find('./host').text
- video_play_path = stream_info.find('./file').text
+ video_url = xpath_text(stream_info, './host')
+ video_play_path = xpath_text(stream_info, './file')
+ if not video_url or not video_play_path:
+ continue
metadata = stream_info.find('./metadata')
format_info = {
'format': video_format,
class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
- IE_NAME = "crunchyroll:playlist"
+ IE_NAME = 'crunchyroll:playlist'
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?(?:\?|$)'
_TESTS = [{
find_xpath_attr,
smuggle_url,
determine_ext,
+ ExtractorError,
)
from .senateisvp import SenateISVPIE
IE_DESC = 'C-SPAN'
_TESTS = [{
'url': 'http://www.c-span.org/video/?313572-1/HolderonV',
- 'md5': '8e44ce11f0f725527daccc453f553eb0',
+ 'md5': '94b29a4f131ff03d23471dd6f60b6a1d',
'info_dict': {
'id': '315139',
'ext': 'mp4',
'title': 'Attorney General Eric Holder on Voting Rights Act Decision',
- 'description': 'Attorney General Eric Holder spoke to reporters following the Supreme Court decision in Shelby County v. Holder in which the court ruled that the preclearance provisions of the Voting Rights Act could not be enforced until Congress established new guidelines for review.',
+ 'description': 'Attorney General Eric Holder speaks to reporters following the Supreme Court decision in [Shelby County v. Holder], in which the court ruled that the preclearance provisions of the Voting Rights Act could not be enforced.',
},
'skip': 'Regularly fails on travis, for unknown reasons',
}, {
'url': 'http://www.c-span.org/video/?c4486943/cspan-international-health-care-models',
- # For whatever reason, the served video alternates between
- # two different ones
+ 'md5': '8e5fbfabe6ad0f89f3012a7943c1287b',
'info_dict': {
- 'id': '340723',
+ 'id': 'c4486943',
'ext': 'mp4',
- 'title': 'International Health Care Models',
+ 'title': 'CSPAN - International Health Care Models',
'description': 'md5:7a985a2d595dba00af3d9c9f0783c967',
}
}, {
'url': 'http://www.c-span.org/video/?318608-1/gm-ignition-switch-recall',
- 'md5': '446562a736c6bf97118e389433ed88d4',
+ 'md5': '2ae5051559169baadba13fc35345ae74',
'info_dict': {
'id': '342759',
'ext': 'mp4',
'title': 'General Motors Ignition Switch Recall',
'duration': 14848,
- 'description': 'md5:70c7c3b8fa63fa60d42772440596034c'
+ 'description': 'md5:118081aedd24bf1d3b68b3803344e7f3'
},
}, {
# Video from senate.gov
}]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- page_id = mobj.group('id')
- webpage = self._download_webpage(url, page_id)
- video_id = self._search_regex(r'progid=\'?([0-9]+)\'?>', webpage, 'video id')
+ video_id = self._match_id(url)
+ video_type = None
+ webpage = self._download_webpage(url, video_id)
+ # We first look for clipid, because clipprog always appears before
+ patterns = [r'id=\'clip(%s)\'\s*value=\'([0-9]+)\'' % t for t in ('id', 'prog')]
+ results = list(filter(None, (re.search(p, webpage) for p in patterns)))
+ if results:
+ matches = results[0]
+ video_type, video_id = matches.groups()
+ video_type = 'clip' if video_type == 'id' else 'program'
+ else:
+ m = re.search(r'data-(?P<type>clip|prog)id=["\'](?P<id>\d+)', webpage)
+ if m:
+ video_id = m.group('id')
+ video_type = 'program' if m.group('type') == 'prog' else 'clip'
+ else:
+ senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
+ if senate_isvp_url:
+ title = self._og_search_title(webpage)
+ surl = smuggle_url(senate_isvp_url, {'force_title': title})
+ return self.url_result(surl, 'SenateISVP', video_id, title)
+ if video_type is None or video_id is None:
+ raise ExtractorError('unable to find video id and type')
- description = self._html_search_regex(
- [
- # The full description
- r'<div class=\'expandable\'>(.*?)<a href=\'#\'',
- # If the description is small enough the other div is not
- # present, otherwise this is a stripped version
- r'<p class=\'initial\'>(.*?)</p>'
- ],
- webpage, 'description', flags=re.DOTALL, default=None)
+ def get_text_attr(d, attr):
+ return d.get(attr, {}).get('#text')
- info_url = 'http://c-spanvideo.org/videoLibrary/assets/player/ajax-player.php?os=android&html5=program&id=' + video_id
- data = self._download_json(info_url, video_id)
+ data = self._download_json(
+ 'http://www.c-span.org/assets/player/ajax-player.php?os=android&html5=%s&id=%s' % (video_type, video_id),
+ video_id)['video']
+ if data['@status'] != 'Success':
+ raise ExtractorError('%s said: %s' % (self.IE_NAME, get_text_attr(data, 'error')), expected=True)
doc = self._download_xml(
- 'http://www.c-span.org/common/services/flashXml.php?programid=' + video_id,
+ 'http://www.c-span.org/common/services/flashXml.php?%sid=%s' % (video_type, video_id),
video_id)
+ description = self._html_search_meta('description', webpage)
+
title = find_xpath_attr(doc, './/string', 'name', 'title').text
thumbnail = find_xpath_attr(doc, './/string', 'name', 'poster').text
- senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
- if senate_isvp_url:
- surl = smuggle_url(senate_isvp_url, {'force_title': title})
- return self.url_result(surl, 'SenateISVP', video_id, title)
-
- files = data['video']['files']
- try:
- capfile = data['video']['capfile']['#text']
- except KeyError:
- capfile = None
+ files = data['files']
+ capfile = get_text_attr(data, 'capfile')
- entries = [{
- 'id': '%s_%d' % (video_id, partnum + 1),
- 'title': (
- title if len(files) == 1 else
- '%s part %d' % (title, partnum + 1)),
- 'url': unescapeHTML(f['path']['#text']),
- 'description': description,
- 'thumbnail': thumbnail,
- 'duration': int_or_none(f.get('length', {}).get('#text')),
- 'subtitles': {
- 'en': [{
- 'url': capfile,
- 'ext': determine_ext(capfile, 'dfxp')
- }],
- } if capfile else None,
- } for partnum, f in enumerate(files)]
+ entries = []
+ for partnum, f in enumerate(files):
+ formats = []
+ for quality in f['qualities']:
+ formats.append({
+ 'format_id': '%s-%sp' % (get_text_attr(quality, 'bitrate'), get_text_attr(quality, 'height')),
+ 'url': unescapeHTML(get_text_attr(quality, 'file')),
+ 'height': int_or_none(get_text_attr(quality, 'height')),
+ 'tbr': int_or_none(get_text_attr(quality, 'bitrate')),
+ })
+ if not formats:
+ path = unescapeHTML(get_text_attr(f, 'path'))
+ if not path:
+ continue
+ formats = self._extract_m3u8_formats(
+ path, video_id, 'mp4', entry_protocol='m3u8_native',
+ m3u8_id='hls') if determine_ext(path) == 'm3u8' else [{'url': path, }]
+ self._sort_formats(formats)
+ entries.append({
+ 'id': '%s_%d' % (video_id, partnum + 1),
+ 'title': (
+ title if len(files) == 1 else
+ '%s part %d' % (title, partnum + 1)),
+ 'formats': formats,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': int_or_none(get_text_attr(f, 'length')),
+ 'subtitles': {
+ 'en': [{
+ 'url': capfile,
+ 'ext': determine_ext(capfile, 'dfxp')
+ }],
+ } if capfile else None,
+ })
if len(entries) == 1:
entry = dict(entries[0])
- entry['id'] = video_id
+ entry['id'] = 'c' + video_id if video_type == 'clip' else video_id
return entry
else:
return {
'_type': 'playlist',
'entries': entries,
'title': title,
- 'id': video_id,
+ 'id': 'c' + video_id if video_type == 'clip' else video_id,
}
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import int_or_none
+
+
+class CultureUnpluggedIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?cultureunplugged\.com/documentary/watch-online/play/(?P<id>\d+)(?:/(?P<display_id>[^/]+))?'
+ _TESTS = [{
+ 'url': 'http://www.cultureunplugged.com/documentary/watch-online/play/53662/The-Next--Best-West',
+ 'md5': 'ac6c093b089f7d05e79934dcb3d228fc',
+ 'info_dict': {
+ 'id': '53662',
+ 'display_id': 'The-Next--Best-West',
+ 'ext': 'mp4',
+ 'title': 'The Next, Best West',
+ 'description': 'md5:0423cd00833dea1519cf014e9d0903b1',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'creator': 'Coldstream Creative',
+ 'duration': 2203,
+ 'view_count': int,
+ }
+ }, {
+ 'url': 'http://www.cultureunplugged.com/documentary/watch-online/play/53662',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ display_id = mobj.group('display_id') or video_id
+
+ movie_data = self._download_json(
+ 'http://www.cultureunplugged.com/movie-data/cu-%s.json' % video_id, display_id)
+
+ video_url = movie_data['url']
+ title = movie_data['title']
+
+ description = movie_data.get('synopsis')
+ creator = movie_data.get('producer')
+ duration = int_or_none(movie_data.get('duration'))
+ view_count = int_or_none(movie_data.get('views'))
+
+ thumbnails = [{
+ 'url': movie_data['%s_thumb' % size],
+ 'id': size,
+ 'preference': preference,
+ } for preference, size in enumerate((
+ 'small', 'large')) if movie_data.get('%s_thumb' % size)]
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'url': video_url,
+ 'title': title,
+ 'description': description,
+ 'creator': creator,
+ 'duration': duration,
+ 'view_count': view_count,
+ 'thumbnails': thumbnails,
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+ int_or_none,
+ parse_iso8601,
+)
+
+
+class CWTVIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?cw(?:tv|seed)\.com/shows/(?:[^/]+/){2}\?play=(?P<id>[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12})'
+ _TESTS = [{
+ 'url': 'http://cwtv.com/shows/arrow/legends-of-yesterday/?play=6b15e985-9345-4f60-baf8-56e96be57c63',
+ 'info_dict': {
+ 'id': '6b15e985-9345-4f60-baf8-56e96be57c63',
+ 'ext': 'mp4',
+ 'title': 'Legends of Yesterday',
+ 'description': 'Oliver and Barry Allen take Kendra Saunders and Carter Hall to a remote location to keep them hidden from Vandal Savage while they figure out how to defeat him.',
+ 'duration': 2665,
+ 'series': 'Arrow',
+ 'season_number': 4,
+ 'season': '4',
+ 'episode_number': 8,
+ 'upload_date': '20151203',
+ 'timestamp': 1449122100,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ }
+ }, {
+ 'url': 'http://www.cwseed.com/shows/whose-line-is-it-anyway/jeff-davis-4/?play=24282b12-ead2-42f2-95ad-26770c2c6088',
+ 'info_dict': {
+ 'id': '24282b12-ead2-42f2-95ad-26770c2c6088',
+ 'ext': 'mp4',
+ 'title': 'Jeff Davis 4',
+ 'description': 'Jeff Davis is back to make you laugh.',
+ 'duration': 1263,
+ 'series': 'Whose Line Is It Anyway?',
+ 'season_number': 11,
+ 'season': '11',
+ 'episode_number': 20,
+ 'upload_date': '20151006',
+ 'timestamp': 1444107300,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ }
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ video_data = self._download_json(
+ 'http://metaframe.digitalsmiths.tv/v2/CWtv/assets/%s/partner/132?format=json' % video_id, video_id)
+
+ formats = self._extract_m3u8_formats(
+ video_data['videos']['variantplaylist']['uri'], video_id, 'mp4')
+
+ thumbnails = [{
+ 'url': image['uri'],
+ 'width': image.get('width'),
+ 'height': image.get('height'),
+ } for image_id, image in video_data['images'].items() if image.get('uri')] if video_data.get('images') else None
+
+ video_metadata = video_data['assetFields']
+
+ subtitles = {
+ 'en': [{
+ 'url': video_metadata['UnicornCcUrl'],
+ }],
+ } if video_metadata.get('UnicornCcUrl') else None
+
+ return {
+ 'id': video_id,
+ 'title': video_metadata['title'],
+ 'description': video_metadata.get('description'),
+ 'duration': int_or_none(video_metadata.get('duration')),
+ 'series': video_metadata.get('seriesName'),
+ 'season_number': int_or_none(video_metadata.get('seasonNumber')),
+ 'season': video_metadata.get('seasonName'),
+ 'episode_number': int_or_none(video_metadata.get('episodeNumber')),
+ 'timestamp': parse_iso8601(video_data.get('startTime')),
+ 'thumbnails': thumbnails,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }
from .common import InfoExtractor
-from ..compat import compat_str
from ..utils import (
- ExtractorError,
determine_ext,
+ error_to_compat_str,
+ ExtractorError,
int_or_none,
parse_iso8601,
sanitized_Request,
class DailymotionIE(DailymotionBaseInfoExtractor):
- _VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'
+ _VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(?:embed|swf|#)/)?video/(?P<id>[^/?_]+)'
IE_NAME = 'dailymotion'
_FORMATS = [
{
'url': 'http://www.dailymotion.com/video/xhza0o',
'only_matching': True,
+ },
+ # with subtitles
+ {
+ 'url': 'http://www.dailymotion.com/video/x20su5f_the-power-of-nightmares-1-the-rise-of-the-politics-of-fear-bbc-2004_news',
+ 'only_matching': True,
+ },
+ {
+ 'url': 'http://www.dailymotion.com/swf/video/x3n92nf',
+ 'only_matching': True,
}
]
description = self._og_search_description(webpage) or self._html_search_meta(
'description', webpage, 'description')
- view_count = str_to_int(self._search_regex(
- [r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:(\d+)"',
- r'video_views_count[^>]+>\s+([\d\.,]+)'],
- webpage, 'view count', fatal=False))
+ view_count_str = self._search_regex(
+ (r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:([\s\d,.]+)"',
+ r'video_views_count[^>]+>\s+([\s\d\,.]+)'),
+ webpage, 'view count', fatal=False)
+ if view_count_str:
+ view_count_str = re.sub(r'\s', '', view_count_str)
+ view_count = str_to_int(view_count_str)
comment_count = int_or_none(self._search_regex(
r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserComments:(\d+)"',
webpage, 'comment count', fatal=False))
player_v5 = self._search_regex(
- [r'buildPlayer\(({.+?})\);', r'playerV5\s*=\s*dmp\.create\([^,]+?,\s*({.+?})\);'],
+ [r'buildPlayer\(({.+?})\);\n', # See https://github.com/rg3/youtube-dl/issues/7826
+ r'playerV5\s*=\s*dmp\.create\([^,]+?,\s*({.+?})\);',
+ r'buildPlayer\(({.+?})\);'],
webpage, 'player v5', default=None)
if player_v5:
player = self._parse_json(player_v5, video_id)
continue
ext = determine_ext(media_url)
if type_ == 'application/x-mpegURL' or ext == 'm3u8':
- m3u8_formats = self._extract_m3u8_formats(
- media_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
- if m3u8_formats:
- formats.extend(m3u8_formats)
+ formats.extend(self._extract_m3u8_formats(
+ media_url, video_id, 'mp4', preference=-1,
+ m3u8_id='hls', fatal=False))
elif type_ == 'application/f4m' or ext == 'f4m':
- f4m_formats = self._extract_f4m_formats(
- media_url, video_id, preference=-1, f4m_id='hds', fatal=False)
- if f4m_formats:
- formats.extend(f4m_formats)
+ formats.extend(self._extract_f4m_formats(
+ media_url, video_id, preference=-1, f4m_id='hds', fatal=False))
else:
f = {
'url': media_url,
- 'format_id': quality,
+ 'format_id': 'http-%s' % quality,
}
m = re.search(r'H264-(?P<width>\d+)x(?P<height>\d+)', media_url)
if m:
uploader_id = metadata.get('owner', {}).get('id')
subtitles = {}
- for subtitle_lang, subtitle in metadata.get('subtitles', {}).get('data', {}).items():
- subtitles[subtitle_lang] = [{
- 'ext': determine_ext(subtitle_url),
- 'url': subtitle_url,
- } for subtitle_url in subtitle.get('urls', [])]
+ subtitles_data = metadata.get('subtitles', {}).get('data', {})
+ if subtitles_data and isinstance(subtitles_data, dict):
+ for subtitle_lang, subtitle in subtitles_data.items():
+ subtitles[subtitle_lang] = [{
+ 'ext': determine_ext(subtitle_url),
+ 'url': subtitle_url,
+ } for subtitle_url in subtitle.get('urls', [])]
return {
'id': video_id,
'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
video_id, note=False)
except ExtractorError as err:
- self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
+ self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
return {}
info = json.loads(sub_list)
if (info['total'] > 0):
class DailymotionUserIE(DailymotionPlaylistIE):
IE_NAME = 'dailymotion:user'
- _VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|#|video|playlist)/)(?:(?:old/)?user/)?(?P<user>[^/]+)'
+ _VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|swf|#|video|playlist)/)(?:(?:old/)?user/)?(?P<user>[^/]+)'
_PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
_TESTS = [{
'url': 'https://www.dailymotion.com/user/nqtv',
}]
@classmethod
- def _extract_dmcloud_url(self, webpage):
- mobj = re.search(r'<iframe[^>]+src=[\'"](%s)[\'"]' % self._VALID_EMBED_URL, webpage)
+ def _extract_dmcloud_url(cls, webpage):
+ mobj = re.search(r'<iframe[^>]+src=[\'"](%s)[\'"]' % cls._VALID_EMBED_URL, webpage)
if mobj:
return mobj.group(1)
mobj = re.search(
- r'<input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=[\'"](%s)[\'"]' % self._VALID_EMBED_URL,
+ r'<input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=[\'"](%s)[\'"]' % cls._VALID_EMBED_URL,
webpage)
if mobj:
return mobj.group(1)
from __future__ import unicode_literals
import re
+import itertools
from .common import InfoExtractor
from ..compat import (
+ compat_parse_qs,
compat_urllib_parse,
+ compat_urllib_parse_unquote,
+ compat_urlparse,
+)
+from ..utils import (
+ int_or_none,
+ str_to_int,
+ xpath_text,
+ unescapeHTML,
)
class DaumIE(InfoExtractor):
- _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/(?:v/|.*?clipid=)(?P<id>[^?#&]+)'
+ _VALID_URL = r'https?://(?:(?:m\.)?tvpot\.daum\.net/v/|videofarm\.daum\.net/controller/player/VodPlayer\.swf\?vid=)(?P<id>[^?#&]+)'
IE_NAME = 'daum.net'
_TESTS = [{
- 'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',
+ 'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz',
'info_dict': {
- 'id': '52554690',
+ 'id': 'vab4dyeDBysyBssyukBUjBz',
'ext': 'mp4',
- 'title': 'DOTA 2GETHER 시즌2 6회 - 2부',
- 'description': 'DOTA 2GETHER 시즌2 6회 - 2부',
- 'upload_date': '20130831',
- 'duration': 3868,
+ 'title': '마크 헌트 vs 안토니오 실바',
+ 'description': 'Mark Hunt vs Antonio Silva',
+ 'upload_date': '20131217',
+ 'thumbnail': 're:^https?://.*\.(?:jpg|png)',
+ 'duration': 2117,
+ 'view_count': int,
+ 'comment_count': int,
},
}, {
- 'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz',
- 'only_matching': True,
+ 'url': 'http://m.tvpot.daum.net/v/65139429',
+ 'info_dict': {
+ 'id': '65139429',
+ 'ext': 'mp4',
+ 'title': '1297회, \'아빠 아들로 태어나길 잘 했어\' 민수, 감동의 눈물[아빠 어디가] 20150118',
+ 'description': 'md5:79794514261164ff27e36a21ad229fc5',
+ 'upload_date': '20150604',
+ 'thumbnail': 're:^https?://.*\.(?:jpg|png)',
+ 'duration': 154,
+ 'view_count': int,
+ 'comment_count': int,
+ },
}, {
'url': 'http://tvpot.daum.net/v/07dXWRka62Y%24',
'only_matching': True,
+ }, {
+ 'url': 'http://videofarm.daum.net/controller/player/VodPlayer.swf?vid=vwIpVpCQsT8%24&ref=',
+ 'info_dict': {
+ 'id': 'vwIpVpCQsT8$',
+ 'ext': 'flv',
+ 'title': '01-Korean War ( Trouble on the horizon )',
+ 'description': '\nKorean War 01\nTrouble on the horizon\n전쟁의 먹구름',
+ 'upload_date': '20080223',
+ 'thumbnail': 're:^https?://.*\.(?:jpg|png)',
+ 'duration': 249,
+ 'view_count': int,
+ 'comment_count': int,
+ },
}]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
- canonical_url = 'http://tvpot.daum.net/v/%s' % video_id
- webpage = self._download_webpage(canonical_url, video_id)
- full_id = self._search_regex(
- r'src=["\']http://videofarm\.daum\.net/controller/video/viewer/Video\.html\?.*?vid=(.+?)[&"\']',
- webpage, 'full id')
- query = compat_urllib_parse.urlencode({'vid': full_id})
+ video_id = compat_urllib_parse_unquote(self._match_id(url))
+ query = compat_urllib_parse.urlencode({'vid': video_id})
+ movie_data = self._download_json(
+ 'http://videofarm.daum.net/controller/api/closed/v1_2/IntegratedMovieData.json?' + query,
+ video_id, 'Downloading video formats info')
+
+ # For urls like http://m.tvpot.daum.net/v/65139429, where the video_id is really a clipid
+ if not movie_data.get('output_list', {}).get('output_list') and re.match(r'^\d+$', video_id):
+ return self.url_result('http://tvpot.daum.net/clip/ClipView.do?clipid=%s' % video_id)
+
info = self._download_xml(
'http://tvpot.daum.net/clip/ClipInfoXml.do?' + query, video_id,
'Downloading video info')
- urls = self._download_xml(
- 'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query,
- video_id, 'Downloading video formats info')
formats = []
- for format_el in urls.findall('result/output_list/output_list'):
- profile = format_el.attrib['profile']
+ for format_el in movie_data['output_list']['output_list']:
+ profile = format_el['profile']
format_query = compat_urllib_parse.urlencode({
- 'vid': full_id,
+ 'vid': video_id,
'profile': profile,
})
url_doc = self._download_xml(
formats.append({
'url': format_url,
'format_id': profile,
+ 'width': int_or_none(format_el.get('width')),
+ 'height': int_or_none(format_el.get('height')),
+ 'filesize': int_or_none(format_el.get('filesize')),
})
+ self._sort_formats(formats)
return {
'id': video_id,
'title': info.find('TITLE').text,
'formats': formats,
- 'thumbnail': self._og_search_thumbnail(webpage),
- 'description': info.find('CONTENTS').text,
- 'duration': int(info.find('DURATION').text),
+ 'thumbnail': xpath_text(info, 'THUMB_URL'),
+ 'description': xpath_text(info, 'CONTENTS'),
+ 'duration': int_or_none(xpath_text(info, 'DURATION')),
'upload_date': info.find('REGDTTM').text[:8],
+ 'view_count': str_to_int(xpath_text(info, 'PLAY_CNT')),
+ 'comment_count': str_to_int(xpath_text(info, 'COMMENT_CNT')),
}
+
+
+class DaumClipIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/(?:clip/ClipView.(?:do|tv)|mypot/View.do)\?.*?clipid=(?P<id>\d+)'
+ IE_NAME = 'daum.net:clip'
+ _URL_TEMPLATE = 'http://tvpot.daum.net/clip/ClipView.do?clipid=%s'
+
+ _TESTS = [{
+ 'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',
+ 'info_dict': {
+ 'id': '52554690',
+ 'ext': 'mp4',
+ 'title': 'DOTA 2GETHER 시즌2 6회 - 2부',
+ 'description': 'DOTA 2GETHER 시즌2 6회 - 2부',
+ 'upload_date': '20130831',
+ 'thumbnail': 're:^https?://.*\.(?:jpg|png)',
+ 'duration': 3868,
+ 'view_count': int,
+ },
+ }, {
+ 'url': 'http://m.tvpot.daum.net/clip/ClipView.tv?clipid=54999425',
+ 'only_matching': True,
+ }]
+
+ @classmethod
+ def suitable(cls, url):
+ return False if DaumPlaylistIE.suitable(url) or DaumUserIE.suitable(url) else super(DaumClipIE, cls).suitable(url)
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ clip_info = self._download_json(
+ 'http://tvpot.daum.net/mypot/json/GetClipInfo.do?clipid=%s' % video_id,
+ video_id, 'Downloading clip info')['clip_bean']
+
+ return {
+ '_type': 'url_transparent',
+ 'id': video_id,
+ 'url': 'http://tvpot.daum.net/v/%s' % clip_info['vid'],
+ 'title': unescapeHTML(clip_info['title']),
+ 'thumbnail': clip_info.get('thumb_url'),
+ 'description': clip_info.get('contents'),
+ 'duration': int_or_none(clip_info.get('duration')),
+ 'upload_date': clip_info.get('up_date')[:8],
+ 'view_count': int_or_none(clip_info.get('play_count')),
+ 'ie_key': 'Daum',
+ }
+
+
+class DaumListIE(InfoExtractor):
+ def _get_entries(self, list_id, list_id_type):
+ name = None
+ entries = []
+ for pagenum in itertools.count(1):
+ list_info = self._download_json(
+ 'http://tvpot.daum.net/mypot/json/GetClipInfo.do?size=48&init=true&order=date&page=%d&%s=%s' % (
+ pagenum, list_id_type, list_id), list_id, 'Downloading list info - %s' % pagenum)
+
+ entries.extend([
+ self.url_result(
+ 'http://tvpot.daum.net/v/%s' % clip['vid'])
+ for clip in list_info['clip_list']
+ ])
+
+ if not name:
+ name = list_info.get('playlist_bean', {}).get('name') or \
+ list_info.get('potInfo', {}).get('name')
+
+ if not list_info.get('has_more'):
+ break
+
+ return name, entries
+
+ def _check_clip(self, url, list_id):
+ query_dict = compat_parse_qs(compat_urlparse.urlparse(url).query)
+ if 'clipid' in query_dict:
+ clip_id = query_dict['clipid'][0]
+ if self._downloader.params.get('noplaylist'):
+ self.to_screen('Downloading just video %s because of --no-playlist' % clip_id)
+ return self.url_result(DaumClipIE._URL_TEMPLATE % clip_id, 'DaumClip')
+ else:
+ self.to_screen('Downloading playlist %s - add --no-playlist to just download video' % list_id)
+
+
+class DaumPlaylistIE(DaumListIE):
+ _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/mypot/(?:View\.do|Top\.tv)\?.*?playlistid=(?P<id>[0-9]+)'
+ IE_NAME = 'daum.net:playlist'
+ _URL_TEMPLATE = 'http://tvpot.daum.net/mypot/View.do?playlistid=%s'
+
+ _TESTS = [{
+ 'note': 'Playlist url with clipid',
+ 'url': 'http://tvpot.daum.net/mypot/View.do?playlistid=6213966&clipid=73806844',
+ 'info_dict': {
+ 'id': '6213966',
+ 'title': 'Woorissica Official',
+ },
+ 'playlist_mincount': 181
+ }, {
+ 'note': 'Playlist url with clipid - noplaylist',
+ 'url': 'http://tvpot.daum.net/mypot/View.do?playlistid=6213966&clipid=73806844',
+ 'info_dict': {
+ 'id': '73806844',
+ 'ext': 'mp4',
+ 'title': '151017 Airport',
+ 'upload_date': '20160117',
+ },
+ 'params': {
+ 'noplaylist': True,
+ 'skip_download': True,
+ }
+ }]
+
+ @classmethod
+ def suitable(cls, url):
+ return False if DaumUserIE.suitable(url) else super(DaumPlaylistIE, cls).suitable(url)
+
+ def _real_extract(self, url):
+ list_id = self._match_id(url)
+
+ clip_result = self._check_clip(url, list_id)
+ if clip_result:
+ return clip_result
+
+ name, entries = self._get_entries(list_id, 'playlistid')
+
+ return self.playlist_result(entries, list_id, name)
+
+
+class DaumUserIE(DaumListIE):
+ _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/mypot/(?:View|Top)\.(?:do|tv)\?.*?ownerid=(?P<id>[0-9a-zA-Z]+)'
+ IE_NAME = 'daum.net:user'
+
+ _TESTS = [{
+ 'url': 'http://tvpot.daum.net/mypot/View.do?ownerid=o2scDLIVbHc0',
+ 'info_dict': {
+ 'id': 'o2scDLIVbHc0',
+ 'title': '마이 리틀 텔레비전',
+ },
+ 'playlist_mincount': 213
+ }, {
+ 'url': 'http://tvpot.daum.net/mypot/View.do?ownerid=o2scDLIVbHc0&clipid=73801156',
+ 'info_dict': {
+ 'id': '73801156',
+ 'ext': 'mp4',
+ 'title': '[미공개] 김구라, 오만석이 부릅니다 \'오케피\' - 마이 리틀 텔레비전 20160116',
+ 'upload_date': '20160117',
+ 'description': 'md5:5e91d2d6747f53575badd24bd62b9f36'
+ },
+ 'params': {
+ 'noplaylist': True,
+ 'skip_download': True,
+ }
+ }, {
+ 'note': 'Playlist url has ownerid and playlistid, playlistid takes precedence',
+ 'url': 'http://tvpot.daum.net/mypot/View.do?ownerid=o2scDLIVbHc0&playlistid=6196631',
+ 'info_dict': {
+ 'id': '6196631',
+ 'title': '마이 리틀 텔레비전 - 20160109',
+ },
+ 'playlist_count': 11
+ }, {
+ 'url': 'http://tvpot.daum.net/mypot/Top.do?ownerid=o2scDLIVbHc0',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://m.tvpot.daum.net/mypot/Top.tv?ownerid=45x1okb1If50&playlistid=3569733',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ list_id = self._match_id(url)
+
+ clip_result = self._check_clip(url, list_id)
+ if clip_result:
+ return clip_result
+
+ query_dict = compat_parse_qs(compat_urlparse.urlparse(url).query)
+ if 'playlistid' in query_dict:
+ playlist_id = query_dict['playlistid'][0]
+ return self.url_result(DaumPlaylistIE._URL_TEMPLATE % playlist_id, 'DaumPlaylist')
+
+ name, entries = self._get_entries(list_id, 'ownerid')
+
+ return self.playlist_result(entries, list_id, name)
class DBTVIE(InfoExtractor):
- _VALID_URL = r'http://dbtv\.no/(?P<id>[0-9]+)#(?P<display_id>.+)'
- _TEST = {
+ _VALID_URL = r'https?://(?:www\.)?dbtv\.no/(?:(?:lazyplayer|player)/)?(?P<id>[0-9]+)(?:#(?P<display_id>.+))?'
+ _TESTS = [{
'url': 'http://dbtv.no/3649835190001#Skulle_teste_ut_fornøyelsespark,_men_kollegaen_var_bare_opptatt_av_bikinikroppen',
'md5': 'b89953ed25dacb6edb3ef6c6f430f8bc',
'info_dict': {
'view_count': int,
'categories': list,
}
- }
+ }, {
+ 'url': 'http://dbtv.no/3649835190001',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.dbtv.no/lazyplayer/4631135248001',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
- display_id = mobj.group('display_id')
+ display_id = mobj.group('display_id') or video_id
data = self._download_json(
'http://api.dbtv.no/discovery/%s' % video_id, display_id)
# coding: utf-8
from __future__ import unicode_literals
+import re
+import base64
+
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import (
+ compat_urllib_parse,
+ compat_str,
+)
from ..utils import (
int_or_none,
parse_iso8601,
sanitized_Request,
+ smuggle_url,
+ unsmuggle_url,
)
class DCNIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?(?:video/.+|show/\d+/.+?)/(?P<id>\d+)'
+ _VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?show/(?P<show_id>\d+)/[^/]+(?:/(?P<video_id>\d+)/(?P<season_id>\d+))?'
+
+ def _real_extract(self, url):
+ show_id, video_id, season_id = re.match(self._VALID_URL, url).groups()
+ if video_id and int(video_id) > 0:
+ return self.url_result(
+ 'http://www.dcndigital.ae/media/%s' % video_id, 'DCNVideo')
+ elif season_id and int(season_id) > 0:
+ return self.url_result(smuggle_url(
+ 'http://www.dcndigital.ae/program/season/%s' % season_id,
+ {'show_id': show_id}), 'DCNSeason')
+ else:
+ return self.url_result(
+ 'http://www.dcndigital.ae/program/%s' % show_id, 'DCNSeason')
+
+
+class DCNBaseIE(InfoExtractor):
+ def _extract_video_info(self, video_data, video_id, is_live):
+ title = video_data.get('title_en') or video_data['title_ar']
+ img = video_data.get('img')
+ thumbnail = 'http://admin.mangomolo.com/analytics/%s' % img if img else None
+ duration = int_or_none(video_data.get('duration'))
+ description = video_data.get('description_en') or video_data.get('description_ar')
+ timestamp = parse_iso8601(video_data.get('create_time'), ' ')
+
+ return {
+ 'id': video_id,
+ 'title': self._live_title(title) if is_live else title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'is_live': is_live,
+ }
+
+ def _extract_video_formats(self, webpage, video_id, entry_protocol):
+ formats = []
+ m3u8_url = self._html_search_regex(
+ r'file\s*:\s*"([^"]+)', webpage, 'm3u8 url', fatal=False)
+ if m3u8_url:
+ formats.extend(self._extract_m3u8_formats(
+ m3u8_url, video_id, 'mp4', entry_protocol, m3u8_id='hls', fatal=None))
+
+ rtsp_url = self._search_regex(
+ r'<a[^>]+href="(rtsp://[^"]+)"', webpage, 'rtsp url', fatal=False)
+ if rtsp_url:
+ formats.append({
+ 'url': rtsp_url,
+ 'format_id': 'rtsp',
+ })
+
+ self._sort_formats(formats)
+ return formats
+
+
+class DCNVideoIE(DCNBaseIE):
+ IE_NAME = 'dcn:video'
+ _VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?(?:video/[^/]+|media|catchup/[^/]+/[^/]+)/(?P<id>\d+)'
_TEST = {
- 'url': 'http://www.dcndigital.ae/#/show/199074/%D8%B1%D8%AD%D9%84%D8%A9-%D8%A7%D9%84%D8%B9%D9%85%D8%B1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1/17375/6887',
+ 'url': 'http://www.dcndigital.ae/#/video/%D8%B1%D8%AD%D9%84%D8%A9-%D8%A7%D9%84%D8%B9%D9%85%D8%B1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1/17375',
'info_dict':
{
'id': '17375',
'ext': 'mp4',
'title': 'رحلة العمر : الحلقة 1',
'description': 'md5:0156e935d870acb8ef0a66d24070c6d6',
- 'thumbnail': 're:^https?://.*\.jpg$',
'duration': 2041,
'timestamp': 1227504126,
'upload_date': '20081124',
request = sanitized_Request(
'http://admin.mangomolo.com/analytics/index.php/plus/video?id=%s' % video_id,
headers={'Origin': 'http://www.dcndigital.ae'})
-
- video = self._download_json(request, video_id)
- title = video.get('title_en') or video['title_ar']
+ video_data = self._download_json(request, video_id)
+ info = self._extract_video_info(video_data, video_id, False)
webpage = self._download_webpage(
'http://admin.mangomolo.com/analytics/index.php/customers/embed/video?' +
compat_urllib_parse.urlencode({
- 'id': video['id'],
- 'user_id': video['user_id'],
- 'signature': video['signature'],
+ 'id': video_data['id'],
+ 'user_id': video_data['user_id'],
+ 'signature': video_data['signature'],
'countries': 'Q0M=',
'filter': 'DENY',
}), video_id)
+ info['formats'] = self._extract_video_formats(webpage, video_id, 'm3u8_native')
+ return info
- m3u8_url = self._html_search_regex(r'file:\s*"([^"]+)', webpage, 'm3u8 url')
- formats = self._extract_m3u8_formats(
- m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls')
- rtsp_url = self._search_regex(
- r'<a[^>]+href="(rtsp://[^"]+)"', webpage, 'rtsp url', fatal=False)
- if rtsp_url:
- formats.append({
- 'url': rtsp_url,
- 'format_id': 'rtsp',
+class DCNLiveIE(DCNBaseIE):
+ IE_NAME = 'dcn:live'
+ _VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?live/(?P<id>\d+)'
+
+ def _real_extract(self, url):
+ channel_id = self._match_id(url)
+
+ request = sanitized_Request(
+ 'http://admin.mangomolo.com/analytics/index.php/plus/getchanneldetails?channel_id=%s' % channel_id,
+ headers={'Origin': 'http://www.dcndigital.ae'})
+
+ channel_data = self._download_json(request, channel_id)
+ info = self._extract_video_info(channel_data, channel_id, True)
+
+ webpage = self._download_webpage(
+ 'http://admin.mangomolo.com/analytics/index.php/customers/embed/index?' +
+ compat_urllib_parse.urlencode({
+ 'id': base64.b64encode(channel_data['user_id'].encode()).decode(),
+ 'channelid': base64.b64encode(channel_data['id'].encode()).decode(),
+ 'signature': channel_data['signature'],
+ 'countries': 'Q0M=',
+ 'filter': 'DENY',
+ }), channel_id)
+ info['formats'] = self._extract_video_formats(webpage, channel_id, 'm3u8')
+ return info
+
+
+class DCNSeasonIE(InfoExtractor):
+ IE_NAME = 'dcn:season'
+ _VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?program/(?:(?P<show_id>\d+)|season/(?P<season_id>\d+))'
+ _TEST = {
+ 'url': 'http://dcndigital.ae/#/program/205024/%D9%85%D8%AD%D8%A7%D8%B6%D8%B1%D8%A7%D8%AA-%D8%A7%D9%84%D8%B4%D9%8A%D8%AE-%D8%A7%D9%84%D8%B4%D8%B9%D8%B1%D8%A7%D9%88%D9%8A',
+ 'info_dict':
+ {
+ 'id': '7910',
+ 'title': 'محاضرات الشيخ الشعراوي',
+ },
+ 'playlist_mincount': 27,
+ }
+
+ def _real_extract(self, url):
+ url, smuggled_data = unsmuggle_url(url, {})
+ show_id, season_id = re.match(self._VALID_URL, url).groups()
+
+ data = {}
+ if season_id:
+ data['season'] = season_id
+ show_id = smuggled_data.get('show_id')
+ if show_id is None:
+ request = sanitized_Request(
+ 'http://admin.mangomolo.com/analytics/index.php/plus/season_info?id=%s' % season_id,
+ headers={'Origin': 'http://www.dcndigital.ae'})
+ season = self._download_json(request, season_id)
+ show_id = season['id']
+ data['show_id'] = show_id
+ request = sanitized_Request(
+ 'http://admin.mangomolo.com/analytics/index.php/plus/show',
+ compat_urllib_parse.urlencode(data),
+ {
+ 'Origin': 'http://www.dcndigital.ae',
+ 'Content-Type': 'application/x-www-form-urlencoded'
})
- self._sort_formats(formats)
+ show = self._download_json(request, show_id)
+ if not season_id:
+ season_id = show['default_season']
+ for season in show['seasons']:
+ if season['id'] == season_id:
+ title = season.get('title_en') or season['title_ar']
- img = video.get('img')
- thumbnail = 'http://admin.mangomolo.com/analytics/%s' % img if img else None
- duration = int_or_none(video.get('duration'))
- description = video.get('description_en') or video.get('description_ar')
- timestamp = parse_iso8601(video.get('create_time') or video.get('update_time'), ' ')
+ entries = []
+ for video in show['videos']:
+ video_id = compat_str(video['id'])
+ entries.append(self.url_result(
+ 'http://www.dcndigital.ae/media/%s' % video_id, 'DCNVideo', video_id))
- return {
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
- 'duration': duration,
- 'timestamp': timestamp,
- 'formats': formats,
- }
+ return self.playlist_result(entries, season_id, title)
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import int_or_none
+
+
+class DigitekaIE(InfoExtractor):
+ _VALID_URL = r'''(?x)
+ https?://(?:www\.)?(?:digiteka\.net|ultimedia\.com)/
+ (?:
+ deliver/
+ (?P<embed_type>
+ generic|
+ musique
+ )
+ (?:/[^/]+)*/
+ (?:
+ src|
+ article
+ )|
+ default/index/video
+ (?P<site_type>
+ generic|
+ music
+ )
+ /id
+ )/(?P<id>[\d+a-z]+)'''
+ _TESTS = [{
+ # news
+ 'url': 'https://www.ultimedia.com/default/index/videogeneric/id/s8uk0r',
+ 'md5': '276a0e49de58c7e85d32b057837952a2',
+ 'info_dict': {
+ 'id': 's8uk0r',
+ 'ext': 'mp4',
+ 'title': 'Loi sur la fin de vie: le texte prévoit un renforcement des directives anticipées',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 74,
+ 'upload_date': '20150317',
+ 'timestamp': 1426604939,
+ 'uploader_id': '3fszv',
+ },
+ }, {
+ # music
+ 'url': 'https://www.ultimedia.com/default/index/videomusic/id/xvpfp8',
+ 'md5': '2ea3513813cf230605c7e2ffe7eca61c',
+ 'info_dict': {
+ 'id': 'xvpfp8',
+ 'ext': 'mp4',
+ 'title': 'Two - C\'est La Vie (clip)',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 233,
+ 'upload_date': '20150224',
+ 'timestamp': 1424760500,
+ 'uploader_id': '3rfzk',
+ },
+ }, {
+ 'url': 'https://www.digiteka.net/deliver/generic/iframe/mdtk/01637594/src/lqm3kl/zone/1/showtitle/1/autoplay/yes',
+ 'only_matching': True,
+ }]
+
+ @staticmethod
+ def _extract_url(webpage):
+ mobj = re.search(
+ r'<(?:iframe|script)[^>]+src=["\'](?P<url>(?:https?:)?//(?:www\.)?ultimedia\.com/deliver/(?:generic|musique)(?:/[^/]+)*/(?:src|article)/[\d+a-z]+)',
+ webpage)
+ if mobj:
+ return mobj.group('url')
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ video_type = mobj.group('embed_type') or mobj.group('site_type')
+ if video_type == 'music':
+ video_type = 'musique'
+
+ deliver_info = self._download_json(
+ 'http://www.ultimedia.com/deliver/video?video=%s&topic=%s' % (video_id, video_type),
+ video_id)
+
+ yt_id = deliver_info.get('yt_id')
+ if yt_id:
+ return self.url_result(yt_id, 'Youtube')
+
+ jwconf = deliver_info['jwconf']
+
+ formats = []
+ for source in jwconf['playlist'][0]['sources']:
+ formats.append({
+ 'url': source['file'],
+ 'format_id': source.get('label'),
+ })
+
+ self._sort_formats(formats)
+
+ title = deliver_info['title']
+ thumbnail = jwconf.get('image')
+ duration = int_or_none(deliver_info.get('duration'))
+ timestamp = int_or_none(deliver_info.get('release_time'))
+ uploader_id = deliver_info.get('owner_id')
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'uploader_id': uploader_id,
+ 'formats': formats,
+ }
class DiscoveryIE(InfoExtractor):
- _VALID_URL = r'http://www\.discovery\.com\/[a-zA-Z0-9\-]*/[a-zA-Z0-9\-]*/videos/(?P<id>[a-zA-Z0-9_\-]*)(?:\.htm)?'
+ _VALID_URL = r'''(?x)http://(?:www\.)?(?:
+ discovery|
+ investigationdiscovery|
+ discoverylife|
+ animalplanet|
+ ahctv|
+ destinationamerica|
+ sciencechannel|
+ tlc|
+ velocity
+ )\.com/(?:[^/]+/)*(?P<id>[^./?#]+)'''
_TESTS = [{
'url': 'http://www.discovery.com/tv-shows/mythbusters/videos/mission-impossible-outtakes.htm',
'info_dict': {
'don\'t miss Adam moon-walking as Jamie ... behind Jamie\'s'
' back.'),
'duration': 156,
- 'timestamp': 1303099200,
- 'upload_date': '20110418',
+ 'timestamp': 1302032462,
+ 'upload_date': '20110405',
},
'params': {
'skip_download': True, # requires ffmpeg
'id': 'mythbusters-the-simpsons',
'title': 'MythBusters: The Simpsons',
},
- 'playlist_count': 9,
+ 'playlist_mincount': 10,
+ }, {
+ 'url': 'http://www.animalplanet.com/longfin-eels-maneaters/',
+ 'info_dict': {
+ 'id': '78326',
+ 'ext': 'mp4',
+ 'title': 'Longfin Eels: Maneaters?',
+ 'description': 'Jeremy Wade tests whether or not New Zealand\'s longfin eels are man-eaters by covering himself in fish guts and getting in the water with them.',
+ 'upload_date': '20140725',
+ 'timestamp': 1406246400,
+ 'duration': 116,
+ },
}]
def _real_extract(self, url):
- video_id = self._match_id(url)
- info = self._download_json(url + '?flat=1', video_id)
+ display_id = self._match_id(url)
+ info = self._download_json(url + '?flat=1', display_id)
video_title = info.get('playlist_title') or info.get('video_title')
entries = [{
'id': compat_str(video_info['id']),
'formats': self._extract_m3u8_formats(
- video_info['src'], video_id, ext='mp4',
+ video_info['src'], display_id, 'mp4', 'm3u8_native', m3u8_id='hls',
note='Download m3u8 information for video %d' % (idx + 1)),
'title': video_info['title'],
'description': video_info.get('description'),
'duration': parse_duration(video_info.get('video_length')),
- 'webpage_url': video_info.get('href'),
+ 'webpage_url': video_info.get('href') or video_info.get('url'),
'thumbnail': video_info.get('thumbnailURL'),
'alt_title': video_info.get('secondary_title'),
'timestamp': parse_iso8601(video_info.get('publishedDate')),
} for idx, video_info in enumerate(info['playlist'])]
- return self.playlist_result(entries, video_id, video_title)
+ return self.playlist_result(entries, display_id, video_title)
import itertools
-from .common import InfoExtractor
+from .amp import AMPIE
from ..compat import (
compat_HTTPError,
compat_urllib_parse,
from ..utils import (
ExtractorError,
clean_html,
- determine_ext,
int_or_none,
- parse_iso8601,
sanitized_Request,
)
-class DramaFeverBaseIE(InfoExtractor):
+class DramaFeverBaseIE(AMPIE):
_LOGIN_URL = 'https://www.dramafever.com/accounts/login/'
_NETRC_MACHINE = 'dramafever'
class DramaFeverIE(DramaFeverBaseIE):
IE_NAME = 'dramafever'
_VALID_URL = r'https?://(?:www\.)?dramafever\.com/drama/(?P<id>[0-9]+/[0-9]+)(?:/|$)'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.dramafever.com/drama/4512/1/Cooking_with_Shin/',
'info_dict': {
'id': '4512.1',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'Cooking with Shin 4512.1',
'description': 'md5:a8eec7942e1664a6896fcd5e1287bfd0',
+ 'episode': 'Episode 1',
+ 'episode_number': 1,
'thumbnail': 're:^https?://.*\.jpg',
'timestamp': 1404336058,
'upload_date': '20140702',
'duration': 343,
- }
- }
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://www.dramafever.com/drama/4826/4/Mnet_Asian_Music_Awards_2015/?ap=1',
+ 'info_dict': {
+ 'id': '4826.4',
+ 'ext': 'mp4',
+ 'title': 'Mnet Asian Music Awards 2015 4826.4',
+ 'description': 'md5:3ff2ee8fedaef86e076791c909cf2e91',
+ 'episode': 'Mnet Asian Music Awards 2015 - Part 3',
+ 'episode_number': 4,
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'timestamp': 1450213200,
+ 'upload_date': '20151215',
+ 'duration': 5602,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ }]
def _real_extract(self, url):
video_id = self._match_id(url).replace('/', '.')
try:
- feed = self._download_json(
- 'http://www.dramafever.com/amp/episode/feed.json?guid=%s' % video_id,
- video_id, 'Downloading episode JSON')['channel']['item']
+ info = self._extract_feed_info(
+ 'http://www.dramafever.com/amp/episode/feed.json?guid=%s' % video_id)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError):
raise ExtractorError(
'Currently unavailable in your country.', expected=True)
raise
- media_group = feed.get('media-group', {})
-
- formats = []
- for media_content in media_group['media-content']:
- src = media_content.get('@attributes', {}).get('url')
- if not src:
- continue
- ext = determine_ext(src)
- if ext == 'f4m':
- formats.extend(self._extract_f4m_formats(
- src, video_id, f4m_id='hds'))
- elif ext == 'm3u8':
- formats.extend(self._extract_m3u8_formats(
- src, video_id, 'mp4', m3u8_id='hls'))
- else:
- formats.append({
- 'url': src,
- })
- self._sort_formats(formats)
-
- title = media_group.get('media-title')
- description = media_group.get('media-description')
- duration = int_or_none(media_group['media-content'][0].get('@attributes', {}).get('duration'))
- thumbnail = self._proto_relative_url(
- media_group.get('media-thumbnail', {}).get('@attributes', {}).get('url'))
- timestamp = parse_iso8601(feed.get('pubDate'), ' ')
-
- subtitles = {}
- for media_subtitle in media_group.get('media-subTitle', []):
- lang = media_subtitle.get('@attributes', {}).get('lang')
- href = media_subtitle.get('@attributes', {}).get('href')
- if not lang or not href:
- continue
- subtitles[lang] = [{
- 'ext': 'ttml',
- 'url': href,
- }]
-
series_id, episode_number = video_id.split('.')
episode_info = self._download_json(
# We only need a single episode info, so restricting page size to one episode
video_id, 'Downloading episode info JSON', fatal=False)
if episode_info:
value = episode_info.get('value')
- if value:
- subfile = value[0].get('subfile') or value[0].get('new_subfile')
- if subfile and subfile != 'http://www.dramafever.com/st/':
- subtitles.setdefault('English', []).append({
- 'ext': 'srt',
- 'url': subfile,
- })
-
- return {
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
- 'timestamp': timestamp,
- 'duration': duration,
- 'formats': formats,
- 'subtitles': subtitles,
- }
+ if isinstance(value, list):
+ for v in value:
+ if v.get('type') == 'Episode':
+ subfile = v.get('subfile') or v.get('new_subfile')
+ if subfile and subfile != 'http://www.dramafever.com/st/':
+ info.setdefault('subtitles', {}).setdefault('English', []).append({
+ 'ext': 'srt',
+ 'url': subfile,
+ })
+ episode_number = int_or_none(v.get('number'))
+ episode_fallback = 'Episode'
+ if episode_number:
+ episode_fallback += ' %d' % episode_number
+ info['episode'] = v.get('title') or episode_fallback
+ info['episode_number'] = episode_number
+ break
+
+ return info
class DramaFeverSeriesIE(DramaFeverBaseIE):
formats = []
for file in info['Files']:
- if info['Type'] == "Video":
+ if info['Type'] == 'Video':
if file['Type'] in video_types:
format = parse_filename_info(file['Location'])
format.update({
if '/bonanza/' in rtmp_url:
format['play_path'] = rtmp_url.split('/bonanza/')[1]
formats.append(format)
- elif file['Type'] == "Thumb":
+ elif file['Type'] == 'Thumb':
thumbnail = file['Location']
- elif info['Type'] == "Audio":
- if file['Type'] == "Audio":
+ elif info['Type'] == 'Audio':
+ if file['Type'] == 'Audio':
format = parse_filename_info(file['Location'])
format.update({
'url': file['Location'],
'vcodec': 'none',
})
formats.append(format)
- elif file['Type'] == "Thumb":
+ elif file['Type'] == 'Thumb':
thumbnail = file['Location']
description = '%s\n%s\n%s\n' % (
import re
-from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
- unified_strdate,
-)
+from .zdf import ZDFIE
-class DreiSatIE(InfoExtractor):
+class DreiSatIE(ZDFIE):
IE_NAME = '3sat'
_VALID_URL = r'(?:http://)?(?:www\.)?3sat\.de/mediathek/(?:index\.php|mediathek\.php)?\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$'
_TESTS = [
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
details_url = 'http://www.3sat.de/mediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id
- details_doc = self._download_xml(details_url, video_id, 'Downloading video details')
-
- status_code = details_doc.find('./status/statuscode')
- if status_code is not None and status_code.text != 'ok':
- code = status_code.text
- if code == 'notVisibleAnymore':
- message = 'Video %s is not available' % video_id
- else:
- message = '%s returned error: %s' % (self.IE_NAME, code)
- raise ExtractorError(message, expected=True)
-
- thumbnail_els = details_doc.findall('.//teaserimage')
- thumbnails = [{
- 'width': int(te.attrib['key'].partition('x')[0]),
- 'height': int(te.attrib['key'].partition('x')[2]),
- 'url': te.text,
- } for te in thumbnail_els]
-
- information_el = details_doc.find('.//information')
- video_title = information_el.find('./title').text
- video_description = information_el.find('./detail').text
-
- details_el = details_doc.find('.//details')
- video_uploader = details_el.find('./channel').text
- upload_date = unified_strdate(details_el.find('./airtime').text)
-
- format_els = details_doc.findall('.//formitaet')
- formats = [{
- 'format_id': fe.attrib['basetype'],
- 'width': int(fe.find('./width').text),
- 'height': int(fe.find('./height').text),
- 'url': fe.find('./url').text,
- 'filesize': int(fe.find('./filesize').text),
- 'video_bitrate': int(fe.find('./videoBitrate').text),
- } for fe in format_els
- if not fe.find('./url').text.startswith('http://www.metafilegenerator.de/')]
-
- self._sort_formats(formats)
-
- return {
- '_type': 'video',
- 'id': video_id,
- 'title': video_title,
- 'formats': formats,
- 'description': video_description,
- 'thumbnails': thumbnails,
- 'thumbnail': thumbnails[-1]['url'],
- 'uploader': video_uploader,
- 'upload_date': upload_date,
- }
+ return self.extract_from_xml_url(video_id, details_url)
subtitles_list = asset.get('SubtitlesList')
if isinstance(subtitles_list, list):
LANGS = {
- 'Danish': 'dk',
+ 'Danish': 'da',
}
for subs in subtitles_list:
lang = subs['Language']
IE_NAME = '8tracks'
_VALID_URL = r'https?://8tracks\.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'
_TEST = {
- "name": "EightTracks",
- "url": "http://8tracks.com/ytdl/youtube-dl-test-tracks-a",
- "info_dict": {
+ 'name': 'EightTracks',
+ 'url': 'http://8tracks.com/ytdl/youtube-dl-test-tracks-a',
+ 'info_dict': {
'id': '1336550',
'display_id': 'youtube-dl-test-tracks-a',
- "description": "test chars: \"'/\\ä↭",
- "title": "youtube-dl test tracks \"'/\\ä↭<>",
+ 'description': "test chars: \"'/\\ä↭",
+ 'title': "youtube-dl test tracks \"'/\\ä↭<>",
},
- "playlist": [
+ 'playlist': [
{
- "md5": "96ce57f24389fc8734ce47f4c1abcc55",
- "info_dict": {
- "id": "11885610",
- "ext": "m4a",
- "title": "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': '96ce57f24389fc8734ce47f4c1abcc55',
+ 'info_dict': {
+ 'id': '11885610',
+ 'ext': 'm4a',
+ 'title': "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "4ab26f05c1f7291ea460a3920be8021f",
- "info_dict": {
- "id": "11885608",
- "ext": "m4a",
- "title": "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': '4ab26f05c1f7291ea460a3920be8021f',
+ 'info_dict': {
+ 'id': '11885608',
+ 'ext': 'm4a',
+ 'title': "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "d30b5b5f74217410f4689605c35d1fd7",
- "info_dict": {
- "id": "11885679",
- "ext": "m4a",
- "title": "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': 'd30b5b5f74217410f4689605c35d1fd7',
+ 'info_dict': {
+ 'id': '11885679',
+ 'ext': 'm4a',
+ 'title': "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "4eb0a669317cd725f6bbd336a29f923a",
- "info_dict": {
- "id": "11885680",
- "ext": "m4a",
- "title": "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': '4eb0a669317cd725f6bbd336a29f923a',
+ 'info_dict': {
+ 'id': '11885680',
+ 'ext': 'm4a',
+ 'title': "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "1893e872e263a2705558d1d319ad19e8",
- "info_dict": {
- "id": "11885682",
- "ext": "m4a",
- "title": "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': '1893e872e263a2705558d1d319ad19e8',
+ 'info_dict': {
+ 'id': '11885682',
+ 'ext': 'm4a',
+ 'title': "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "b673c46f47a216ab1741ae8836af5899",
- "info_dict": {
- "id": "11885683",
- "ext": "m4a",
- "title": "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': 'b673c46f47a216ab1741ae8836af5899',
+ 'info_dict': {
+ 'id': '11885683',
+ 'ext': 'm4a',
+ 'title': "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "1d74534e95df54986da7f5abf7d842b7",
- "info_dict": {
- "id": "11885684",
- "ext": "m4a",
- "title": "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': '1d74534e95df54986da7f5abf7d842b7',
+ 'info_dict': {
+ 'id': '11885684',
+ 'ext': 'm4a',
+ 'title': "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "f081f47af8f6ae782ed131d38b9cd1c0",
- "info_dict": {
- "id": "11885685",
- "ext": "m4a",
- "title": "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': 'f081f47af8f6ae782ed131d38b9cd1c0',
+ 'info_dict': {
+ 'id': '11885685',
+ 'ext': 'm4a',
+ 'title': "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
}
]
# coding: utf-8
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
+from ..compat import compat_urlparse
+from ..utils import (
+ remove_start,
+ sanitized_Request,
+)
class EinthusanIE(InfoExtractor):
]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
- webpage = self._download_webpage(url, video_id)
+ video_id = self._match_id(url)
+
+ request = sanitized_Request(url)
+ request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0')
+ webpage = self._download_webpage(request, video_id)
+
+ title = self._html_search_regex(
+ r'<h1><a[^>]+class=["\']movie-title["\'][^>]*>(.+?)</a></h1>',
+ webpage, 'title')
- video_title = self._html_search_regex(
- r'<h1><a class="movie-title".*?>(.*?)</a></h1>', webpage, 'title')
+ video_id = self._search_regex(
+ r'data-movieid=["\'](\d+)', webpage, 'video id', default=video_id)
- video_url = self._html_search_regex(
- r'''(?s)jwplayer\("mediaplayer"\)\.setup\({.*?'file': '([^']+)'.*?}\);''',
- webpage, 'video url')
+ video_url = self._download_webpage(
+ 'http://cdn.einthusan.com/geturl/%s/hd/London,Washington,Toronto,Dallas,San,Sydney/'
+ % video_id, video_id)
description = self._html_search_meta('description', webpage)
thumbnail = self._html_search_regex(
r'''<a class="movie-cover-wrapper".*?><img src=["'](.*?)["'].*?/></a>''',
webpage, "thumbnail url", fatal=False)
if thumbnail is not None:
- thumbnail = thumbnail.replace('..', 'http://www.einthusan.com')
+ thumbnail = compat_urlparse.urljoin(url, remove_start(thumbnail, '..'))
return {
'id': video_id,
- 'title': video_title,
+ 'title': title,
'url': video_url,
'thumbnail': thumbnail,
'description': description,
if token_data:
token = token_data.get('token')
if token:
- m3u8_formats = self._extract_m3u8_formats(
- '%s?hdnts=%s' % (hls_url, token), video_id, m3u8_id='hls', fatal=False)
- if m3u8_formats:
- formats.extend(m3u8_formats)
+ formats.extend(self._extract_m3u8_formats(
+ '%s?hdnts=%s' % (hls_url, token), video_id, m3u8_id='hls', fatal=False))
hds_url = media.get('HDS_SURL')
if hds_url:
- f4m_formats = self._extract_f4m_formats(
+ formats.extend(self._extract_f4m_formats(
'%s?hdcore=3.7.0' % hds_url.replace('euskalsvod', 'euskalvod'),
- video_id, f4m_id='hds', fatal=False)
- if f4m_formats:
- formats.extend(f4m_formats)
+ video_id, f4m_id='hds', fatal=False))
self._sort_formats(formats)
_VALID_URL = r'https?://(?:www\.)?(?:ellentv|ellentube)\.com/videos/(?P<id>[a-z0-9_-]+)'
_TEST = {
'url': 'http://www.ellentv.com/videos/0-ipq1gsai/',
- 'md5': '8e3c576bf2e9bfff4d76565f56f94c9c',
+ 'md5': '4294cf98bc165f218aaa0b89e0fd8042',
'info_dict': {
'id': '0_ipq1gsai',
- 'ext': 'mp4',
+ 'ext': 'mov',
'title': 'Fast Fingers of Fate',
- 'description': 'md5:587e79fbbd0d73b148bc596d99ce48e6',
+ 'description': 'md5:3539013ddcbfa64b2a6d1b38d910868a',
'timestamp': 1428035648,
'upload_date': '20150403',
'uploader_id': 'batchUser',
def _extract_playlist(self, webpage):
json_string = self._search_regex(r'playerView.addClips\(\[\{(.*?)\}\]\);', webpage, 'json')
try:
- return json.loads("[{" + json_string + "}]")
+ return json.loads('[{' + json_string + '}]')
except ValueError as ve:
raise ExtractorError('Failed to download JSON', cause=ve)
from __future__ import unicode_literals
from .common import InfoExtractor
+from ..utils import remove_end
class ESPNIE(InfoExtractor):
_VALID_URL = r'https?://espn\.go\.com/(?:[^/]+/)*(?P<id>[^/]+)'
- _WORKING = False
_TESTS = [{
'url': 'http://espn.go.com/video/clip?id=10365079',
'info_dict': {
'id': 'FkYWtmazr6Ed8xmvILvKLWjd4QvYZpzG',
'ext': 'mp4',
- 'title': 'dm_140128_30for30Shorts___JudgingJewellv2',
- 'description': '',
+ 'title': '30 for 30 Shorts: Judging Jewell',
+ 'description': None,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ }, {
+ # intl video, from http://www.espnfc.us/video/mls-highlights/150/video/2743663/must-see-moments-best-of-the-mls-season
+ 'url': 'http://espn.go.com/video/clip?id=2743663',
+ 'info_dict': {
+ 'id': '50NDFkeTqRHB0nXBOK-RGdSG5YQPuxHg',
+ 'ext': 'mp4',
+ 'title': 'Must-See Moments: Best of the MLS season',
},
'params': {
# m3u8 download
webpage = self._download_webpage(url, video_id)
video_id = self._search_regex(
- r'class="video-play-button"[^>]+data-id="(\d+)',
- webpage, 'video id')
+ r'class=(["\']).*?video-play-button.*?\1[^>]+data-id=["\'](?P<id>\d+)',
+ webpage, 'video id', group='id')
+ cms = 'espn'
+ if 'data-source="intl"' in webpage:
+ cms = 'intl'
+ player_url = 'https://espn.go.com/video/iframe/twitter/?id=%s&cms=%s' % (video_id, cms)
player = self._download_webpage(
- 'https://espn.go.com/video/iframe/twitter/?id=%s' % video_id, video_id)
+ player_url, video_id)
pcode = self._search_regex(
r'["\']pcode=([^"\']+)["\']', player, 'pcode')
- return self.url_result(
- 'ooyalaexternal:espn:%s:%s' % (video_id, pcode),
- 'OoyalaExternal')
+ title = remove_end(
+ self._og_search_title(webpage),
+ '- ESPN Video').strip()
+
+ return {
+ '_type': 'url_transparent',
+ 'url': 'ooyalaexternal:%s:%s:%s' % (cms, video_id, pcode),
+ 'ie_key': 'OoyalaExternal',
+ 'title': title,
+ }
webpage, 'duration', fatal=False))
upload_date = unified_strdate(self._html_search_meta(
- 'last-modified', webpage, 'upload date', fatal=None))
+ 'last-modified', webpage, 'upload date', fatal=False))
return {
'id': video_id,
_TESTS = [{
'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi/5',
- "info_dict": {
+ 'info_dict': {
'id': '5bfseWNmlds',
'ext': 'mp4',
- "title": "Passion Pit - \"Sleepyhead\" (Official Music Video)",
- "uploader": "FKR.TV",
- "uploader_id": "frenchkissrecords",
- "description": "Music video for \"Sleepyhead\" from Passion Pit's debut EP Chunk Of Change.\nBuy on iTunes: https://itunes.apple.com/us/album/chunk-of-change-ep/id300087641\n\nDirected by The Wilderness.\n\nhttp://www.passionpitmusic.com\nhttp://www.frenchkissrecords.com",
- "upload_date": "20081015"
+ 'title': "Passion Pit - \"Sleepyhead\" (Official Music Video)",
+ 'uploader': 'FKR.TV',
+ 'uploader_id': 'frenchkissrecords',
+ 'description': "Music video for \"Sleepyhead\" from Passion Pit's debut EP Chunk Of Change.\nBuy on iTunes: https://itunes.apple.com/us/album/chunk-of-change-ep/id300087641\n\nDirected by The Wilderness.\n\nhttp://www.passionpitmusic.com\nhttp://www.frenchkissrecords.com",
+ 'upload_date': '20081015'
},
'params': {
'skip_download': True, # This is simply YouTube
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
song_id = mobj.group('id')
- info_url = "http://ex.fm/api/v3/song/%s" % song_id
+ info_url = 'http://ex.fm/api/v3/song/%s' % song_id
info = self._download_json(info_url, song_id)['song']
song_url = info['url']
if re.match(self._SOUNDCLOUD_URL, song_url) is not None:
from .common import InfoExtractor
from ..compat import (
+ compat_etree_fromstring,
compat_http_client,
- compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
+ compat_urllib_parse_unquote_plus,
)
from ..utils import (
+ error_to_compat_str,
ExtractorError,
limit_length,
sanitized_Request,
class FacebookIE(InfoExtractor):
_VALID_URL = r'''(?x)
- https?://(?:\w+\.)?facebook\.com/
- (?:[^#]*?\#!/)?
- (?:
- (?:video/video\.php|photo\.php|video\.php|video/embed)\?(?:.*?)
- (?:v|video_id)=|
- [^/]+/videos/(?:[^/]+/)?
- )
- (?P<id>[0-9]+)
- (?:.*)'''
+ (?:
+ https?://
+ (?:\w+\.)?facebook\.com/
+ (?:[^#]*?\#!/)?
+ (?:
+ (?:
+ video/video\.php|
+ photo\.php|
+ video\.php|
+ video/embed
+ )\?(?:.*?)(?:v|video_id)=|
+ [^/]+/videos/(?:[^/]+/)?
+ )|
+ facebook:
+ )
+ (?P<id>[0-9]+)
+ '''
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
_NETRC_MACHINE = 'facebook'
IE_NAME = 'facebook'
+
+ _CHROME_USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.97 Safari/537.36'
+
_TESTS = [{
'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
'md5': '6a40d33c0eccbb1af76cf0485a052659',
'expected_warnings': [
'title'
]
+ }, {
+ 'note': 'Video with DASH manifest',
+ 'url': 'https://www.facebook.com/video.php?v=957955867617029',
+ 'md5': '54706e4db4f5ad58fbad82dde1f1213f',
+ 'info_dict': {
+ 'id': '957955867617029',
+ 'ext': 'mp4',
+ 'title': 'When you post epic content on instagram.com/433 8 million followers, this is ...',
+ 'uploader': 'Demy de Zeeuw',
+ },
}, {
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater',
'only_matching': True,
+ }, {
+ 'url': 'facebook:544765982287235',
+ 'only_matching': True,
}]
def _login(self):
return
login_page_req = sanitized_Request(self._LOGIN_URL)
- login_page_req.add_header('Cookie', 'locale=en_US')
+ self._set_cookie('facebook.com', 'locale', 'en_US')
login_page = self._download_webpage(login_page_req, None,
note='Downloading login page',
errnote='Unable to download login page')
login_results = self._download_webpage(request, None,
note='Logging in', errnote='unable to fetch login page')
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
- self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
+ error = self._html_search_regex(
+ r'(?s)<div[^>]+class=(["\']).*?login_error_box.*?\1[^>]*><div[^>]*>.*?</div><div[^>]*>(?P<error>.+?)</div>',
+ login_results, 'login error', default=None, group='error')
+ if error:
+ raise ExtractorError('Unable to login: %s' % error, expected=True)
+ self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
+ return
+
+ fb_dtsg = self._search_regex(
+ r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg', default=None)
+ h = self._search_regex(
+ r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h', default=None)
+
+ if not fb_dtsg or not h:
return
check_form = {
- 'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'),
- 'h': self._search_regex(
- r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'),
+ 'fb_dtsg': fb_dtsg,
+ 'h': h,
'name_action_selected': 'dont_save',
}
check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
check_response = self._download_webpage(check_req, None,
note='Confirming login')
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
- self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')
+ self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_warning('unable to log in: %s' % compat_str(err))
+ self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
return
def _real_initialize(self):
def _real_extract(self, url):
video_id = self._match_id(url)
- url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
- webpage = self._download_webpage(url, video_id)
+ req = sanitized_Request('https://www.facebook.com/video/video.php?v=%s' % video_id)
+ req.add_header('User-Agent', self._CHROME_USER_AGENT)
+ webpage = self._download_webpage(req, video_id)
+
+ video_data = None
BEFORE = '{swf.addParam(param[0], param[1]);});\n'
AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
- if not m:
+ if m:
+ data = dict(json.loads(m.group(1)))
+ params_raw = compat_urllib_parse_unquote(data['params'])
+ video_data = json.loads(params_raw)['video_data']
+
+ def video_data_list2dict(video_data):
+ ret = {}
+ for item in video_data:
+ format_id = item['stream_type']
+ ret.setdefault(format_id, []).append(item)
+ return ret
+
+ if not video_data:
+ server_js_data = self._parse_json(self._search_regex(
+ r'handleServerJS\(({.+})\);', webpage, 'server js data'), video_id)
+ for item in server_js_data.get('instances', []):
+ if item[1][0] == 'VideoConfig':
+ video_data = video_data_list2dict(item[2][0]['videoData'])
+ break
+
+ if not video_data:
m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
if m_msg is not None:
raise ExtractorError(
expected=True)
else:
raise ExtractorError('Cannot parse data')
- data = dict(json.loads(m.group(1)))
- params_raw = compat_urllib_parse_unquote(data['params'])
- params = json.loads(params_raw)
formats = []
- for format_id, f in params['video_data'].items():
+ for format_id, f in video_data.items():
if not f or not isinstance(f, list):
continue
for quality in ('sd', 'hd'):
'url': src,
'preference': -10 if format_id == 'progressive' else 0,
})
+ dash_manifest = f[0].get('dash_manifest')
+ if dash_manifest:
+ formats.extend(self._parse_mpd_formats(
+ compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest))))
if not formats:
raise ExtractorError('Cannot find video formats')
+ self._sort_formats(formats)
+
video_title = self._html_search_regex(
r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage, 'title',
default=None)
if not video_title:
video_title = self._html_search_regex(
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
- webpage, 'alternative title', fatal=False)
+ webpage, 'alternative title', default=None)
video_title = limit_length(video_title, 80)
if not video_title:
video_title = 'Facebook video #%s' % video_id
'formats': formats,
'uploader': uploader,
}
+
+
+class FacebookPostIE(InfoExtractor):
+ IE_NAME = 'facebook:post'
+ _VALID_URL = r'https?://(?:\w+\.)?facebook\.com/[^/]+/posts/(?P<id>\d+)'
+ _TEST = {
+ 'url': 'https://www.facebook.com/maxlayn/posts/10153807558977570',
+ 'md5': '037b1fa7f3c2d02b7a0d7bc16031ecc6',
+ 'info_dict': {
+ 'id': '544765982287235',
+ 'ext': 'mp4',
+ 'title': '"What are you doing running in the snow?"',
+ 'uploader': 'FailArmy',
+ }
+ }
+
+ def _real_extract(self, url):
+ post_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, post_id)
+
+ entries = [
+ self.url_result('facebook:%s' % video_id, FacebookIE.ie_key())
+ for video_id in self._parse_json(
+ self._search_regex(
+ r'(["\'])video_ids\1\s*:\s*(?P<ids>\[.+?\])',
+ webpage, 'video ids', group='ids'),
+ post_id)]
+
+ return self.playlist_result(entries, post_id)
from __future__ import unicode_literals
from .common import InfoExtractor
+from ..utils import (
+ xpath_element,
+ xpath_text,
+ int_or_none,
+)
class FazIE(InfoExtractor):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
+ description = self._og_search_description(webpage)
config_xml_url = self._search_regex(
- r'writeFLV\(\'(.+?)\',', webpage, 'config xml url')
+ r'videoXMLURL\s*=\s*"([^"]+)', webpage, 'config xml url')
config = self._download_xml(
config_xml_url, video_id, 'Downloading config xml')
- encodings = config.find('ENCODINGS')
+ encodings = xpath_element(config, 'ENCODINGS', 'encodings', True)
formats = []
for pref, code in enumerate(['LOW', 'HIGH', 'HQ']):
- encoding = encodings.find(code)
- if encoding is None:
- continue
- encoding_url = encoding.find('FILENAME').text
- formats.append({
- 'url': encoding_url,
- 'format_id': code.lower(),
- 'quality': pref,
- })
+ encoding = xpath_element(encodings, code)
+ if encoding is not None:
+ encoding_url = xpath_text(encoding, 'FILENAME')
+ if encoding_url:
+ formats.append({
+ 'url': encoding_url,
+ 'format_id': code.lower(),
+ 'quality': pref,
+ 'tbr': int_or_none(xpath_text(encoding, 'AVERAGEBITRATE')),
+ })
self._sort_formats(formats)
- descr = self._html_search_regex(
- r'<p class="Content Copy">(.*?)</p>', webpage, 'description', fatal=False)
return {
'id': video_id,
'title': self._og_search_title(webpage),
'formats': formats,
- 'description': descr,
- 'thumbnail': config.find('STILL/STILL_BIG').text,
+ 'description': description.strip() if description else None,
+ 'thumbnail': xpath_text(config, 'STILL/STILL_BIG'),
+ 'duration': int_or_none(xpath_text(config, 'DURATION')),
}
'params': {
'username': 'ytdl@yt-dl.org',
'password': '(snip)',
- 'skip': 'requires actual password'
- }
+ },
+ 'skip': 'requires actual password',
}, {
'url': 'http://video.fc2.com/en/a/content/20130926eZpARwsF',
'only_matching': True,
mimi = hashlib.md5((video_id + '_gGddgPfeaf_gzyr').encode('utf-8')).hexdigest()
info_url = (
- "http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
+ 'http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&'.
format(video_id, mimi, compat_urllib_request.quote(refer, safe=b'').replace('.', '%2E')))
info_webpage = self._download_webpage(
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
from ..utils import (
clean_html,
determine_ext,
- ExtractorError,
+ js_to_json,
)
'http://fernsehkritik.tv/folge-%s/play' % episode, episode)
title = clean_html(self._html_search_regex(
'<h3>([^<]+)</h3>', webpage, 'title'))
- matches = re.search(
- r'(?s)<video(?:(?!poster)[^>])+(?:poster="([^"]+)")?[^>]*>(.*)</video>',
- webpage)
- if matches is None:
- raise ExtractorError('Unable to extract the video')
-
- poster, sources = matches.groups()
- if poster is None:
- self.report_warning('unable to extract thumbnail')
-
- urls = re.findall(r'<source[^>]+src="([^"]+)"', sources)
- formats = [{
- 'url': furl,
- 'format_id': determine_ext(furl),
- } for furl in urls]
+ thumbnail = self._search_regex(r'POSTER\s*=\s*"([^"]+)', webpage, 'thumbnail', fatal=False)
+ sources = self._parse_json(self._search_regex(r'(?s)MEDIA\s*=\s*(\[.+?\]);', webpage, 'media'), episode, js_to_json)
+
+ formats = []
+ for source in sources:
+ furl = source.get('src')
+ if furl:
+ formats.append({
+ 'url': furl,
+ 'format_id': determine_ext(furl),
+ })
+ self._sort_formats(formats)
+
return {
'id': episode,
'title': title,
'formats': formats,
- 'thumbnail': poster,
+ 'thumbnail': thumbnail,
}
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
+from ..compat import compat_urllib_parse
from ..utils import (
ExtractorError,
- find_xpath_attr,
- sanitized_Request,
+ int_or_none,
+ qualities,
)
class FlickrIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
+ _VALID_URL = r'https?://(?:www\.|secure\.)?flickr\.com/photos/[\w\-_@]+/(?P<id>\d+)'
_TEST = {
'url': 'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/',
- 'md5': '6fdc01adbc89d72fc9c4f15b4a4ba87b',
+ 'md5': '164fe3fa6c22e18d448d4d5af2330f31',
'info_dict': {
'id': '5645318632',
- 'ext': 'mp4',
- "description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",
- "uploader_id": "forestwander-nature-pictures",
- "title": "Dark Hollow Waterfalls"
+ 'ext': 'mpg',
+ 'description': 'Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.',
+ 'title': 'Dark Hollow Waterfalls',
+ 'duration': 19,
+ 'timestamp': 1303528740,
+ 'upload_date': '20110423',
+ 'uploader_id': '10922353@N03',
+ 'uploader': 'Forest Wander',
+ 'comment_count': int,
+ 'view_count': int,
+ 'tags': list,
}
}
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
+ _API_BASE_URL = 'https://api.flickr.com/services/rest?'
- video_id = mobj.group('id')
- video_uploader_id = mobj.group('uploader_id')
- webpage_url = 'http://www.flickr.com/photos/' + video_uploader_id + '/' + video_id
- req = sanitized_Request(webpage_url)
- req.add_header(
- 'User-Agent',
- # it needs a more recent version
- 'Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20150101 Firefox/38.0 (Chrome)')
- webpage = self._download_webpage(req, video_id)
+ def _call_api(self, method, video_id, api_key, note, secret=None):
+ query = {
+ 'photo_id': video_id,
+ 'method': 'flickr.%s' % method,
+ 'api_key': api_key,
+ 'format': 'json',
+ 'nojsoncallback': 1,
+ }
+ if secret:
+ query['secret'] = secret
+ data = self._download_json(self._API_BASE_URL + compat_urllib_parse.urlencode(query), video_id, note)
+ if data['stat'] != 'ok':
+ raise ExtractorError(data['message'])
+ return data
- secret = self._search_regex(r'secret"\s*:\s*"(\w+)"', webpage, 'secret')
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
- first_url = 'https://secure.flickr.com/apps/video/video_mtl_xml.gne?v=x&photo_id=' + video_id + '&secret=' + secret + '&bitrate=700&target=_self'
- first_xml = self._download_xml(first_url, video_id, 'Downloading first data webpage')
+ api_key = self._download_json(
+ 'https://www.flickr.com/hermes_error_beacon.gne', video_id,
+ 'Downloading api key')['site_key']
- node_id = find_xpath_attr(
- first_xml, './/{http://video.yahoo.com/YEP/1.0/}Item', 'id',
- 'id').text
+ video_info = self._call_api(
+ 'photos.getInfo', video_id, api_key, 'Downloading video info')['photo']
+ if video_info['media'] == 'video':
+ streams = self._call_api(
+ 'video.getStreamInfo', video_id, api_key,
+ 'Downloading streams info', video_info['secret'])['streams']
- second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1'
- second_xml = self._download_xml(second_url, video_id, 'Downloading second data webpage')
+ preference = qualities(
+ ['288p', 'iphone_wifi', '100', '300', '700', '360p', 'appletv', '720p', '1080p', 'orig'])
- self.report_extraction(video_id)
+ formats = []
+ for stream in streams['stream']:
+ stream_type = str(stream.get('type'))
+ formats.append({
+ 'format_id': stream_type,
+ 'url': stream['_content'],
+ 'preference': preference(stream_type),
+ })
+ self._sort_formats(formats)
- stream = second_xml.find('.//STREAM')
- if stream is None:
- raise ExtractorError('Unable to extract video url')
- video_url = stream.attrib['APP'] + stream.attrib['FULLPATH']
+ owner = video_info.get('owner', {})
- return {
- 'id': video_id,
- 'url': video_url,
- 'ext': 'mp4',
- 'title': self._og_search_title(webpage),
- 'description': self._og_search_description(webpage),
- 'thumbnail': self._og_search_thumbnail(webpage),
- 'uploader_id': video_uploader_id,
- }
+ return {
+ 'id': video_id,
+ 'title': video_info['title']['_content'],
+ 'description': video_info.get('description', {}).get('_content'),
+ 'formats': formats,
+ 'timestamp': int_or_none(video_info.get('dateuploaded')),
+ 'duration': int_or_none(video_info.get('video', {}).get('duration')),
+ 'uploader_id': owner.get('nsid'),
+ 'uploader': owner.get('realname'),
+ 'comment_count': int_or_none(video_info.get('comments', {}).get('_content')),
+ 'view_count': int_or_none(video_info.get('views')),
+ 'tags': [tag.get('_content') for tag in video_info.get('tags', {}).get('tag', [])]
+ }
+ else:
+ raise ExtractorError('not a video', expected=True)
'title': 'Schalke 04 0 – 2 Real Madrid',
},
'playlist_count': 3,
+ 'skip': 'Video for this match is not available',
}, {
'url': 'http://footyroom.com/georgia-0-2-germany-2015-03/',
'info_dict': {
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import smuggle_url
+
+
+class FOXIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?fox\.com/watch/(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://www.fox.com/watch/255180355939/7684182528',
+ 'md5': 'ebd296fcc41dd4b19f8115d8461a3165',
+ 'info_dict': {
+ 'id': '255180355939',
+ 'ext': 'mp4',
+ 'title': 'Official Trailer: Gotham',
+ 'description': 'Tracing the rise of the great DC Comics Super-Villains and vigilantes, Gotham reveals an entirely new chapter that has never been told.',
+ 'duration': 129,
+ },
+ 'add_ie': ['ThePlatform'],
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ release_url = self._parse_json(self._search_regex(
+ r'"fox_pdk_player"\s*:\s*({[^}]+?})', webpage, 'fox_pdk_player'),
+ video_id)['release_url'] + '&switch=http'
+
+ return {
+ '_type': 'url_transparent',
+ 'ie_key': 'ThePlatform',
+ 'url': smuggle_url(release_url, {'force_smil_url': True}),
+ 'id': video_id,
+ }
import re
-from .common import InfoExtractor
-from ..utils import (
- parse_iso8601,
- int_or_none,
-)
+from .amp import AMPIE
-class FoxNewsIE(InfoExtractor):
+class FoxNewsIE(AMPIE):
IE_DESC = 'Fox News and Fox Business Video'
_VALID_URL = r'https?://(?P<host>video\.fox(?:news|business)\.com)/v/(?:video-embed\.html\?video_id=)?(?P<id>\d+)'
_TESTS = [
'id': '3937480',
'ext': 'flv',
'title': 'Frozen in Time',
- 'description': 'Doctors baffled by 16-year-old girl that is the size of a toddler',
+ 'description': '16-year-old girl is size of toddler',
'duration': 265,
- 'timestamp': 1304411491,
- 'upload_date': '20110503',
+ # 'timestamp': 1304411491,
+ # 'upload_date': '20110503',
'thumbnail': 're:^https?://.*\.jpg$',
},
},
'id': '3922535568001',
'ext': 'mp4',
'title': "Rep. Luis Gutierrez on if Obama's immigration plan is legal",
- 'description': "Congressman discusses the president's executive action",
+ 'description': "Congressman discusses president's plan",
'duration': 292,
- 'timestamp': 1417662047,
- 'upload_date': '20141204',
+ # 'timestamp': 1417662047,
+ # 'upload_date': '20141204',
'thumbnail': 're:^https?://.*\.jpg$',
},
},
]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
- host = mobj.group('host')
+ host, video_id = re.match(self._VALID_URL, url).groups()
- video = self._download_json(
- 'http://%s/v/feed/video/%s.js?template=fox' % (host, video_id), video_id)
-
- item = video['channel']['item']
- title = item['title']
- description = item['description']
- timestamp = parse_iso8601(item['dc-date'])
-
- media_group = item['media-group']
- duration = None
- formats = []
- for media in media_group['media-content']:
- attributes = media['@attributes']
- video_url = attributes['url']
- if video_url.endswith('.f4m'):
- formats.extend(self._extract_f4m_formats(video_url + '?hdcore=3.4.0&plugin=aasp-3.4.0.132.124', video_id))
- elif video_url.endswith('.m3u8'):
- formats.extend(self._extract_m3u8_formats(video_url, video_id, 'flv'))
- elif not video_url.endswith('.smil'):
- duration = int_or_none(attributes.get('duration'))
- formats.append({
- 'url': video_url,
- 'format_id': media['media-category']['@attributes']['label'],
- 'preference': 1,
- 'vbr': int_or_none(attributes.get('bitrate')),
- 'filesize': int_or_none(attributes.get('fileSize'))
- })
- self._sort_formats(formats)
-
- media_thumbnail = media_group['media-thumbnail']['@attributes']
- thumbnails = [{
- 'url': media_thumbnail['url'],
- 'width': int_or_none(media_thumbnail.get('width')),
- 'height': int_or_none(media_thumbnail.get('height')),
- }] if media_thumbnail else []
-
- return {
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'duration': duration,
- 'timestamp': timestamp,
- 'formats': formats,
- 'thumbnails': thumbnails,
- }
+ info = self._extract_feed_info(
+ 'http://%s/v/feed/video/%s.js?template=fox' % (host, video_id))
+ info['id'] = video_id
+ return info
from ..utils import (
determine_ext,
int_or_none,
+ ExtractorError,
)
'alt_title': 'Carnet nomade | 13-14',
'vcodec': 'none',
'upload_date': '20140301',
- 'thumbnail': r're:^http://www\.franceculture\.fr/.*/images/player/Carnet-nomade\.jpg$',
- 'description': 'startswith:Avec :Jean-Baptiste Péretié pour son documentaire sur Arte "La revanche des « geeks », une enquête menée aux Etats',
+ 'thumbnail': r're:^http://static\.franceculture\.fr/.*/images/player/Carnet-nomade\.jpg$',
+ 'description': 'startswith:Avec :Jean-Baptiste Péretié pour son documentaire sur Arte "La revanche',
'timestamp': 1393700400,
}
}
- def _real_extract(self, url):
- video_id = self._match_id(url)
+ def _extract_from_player(self, url, video_id):
webpage = self._download_webpage(url, video_id)
video_path = self._search_regex(
r'<a id="player".*?>\s+<img src="([^"]+)"',
webpage, 'thumbnail', fatal=False)
+ display_id = self._search_regex(
+ r'<span class="path-diffusion">emission-(.*?)</span>', webpage, 'display_id')
+
title = self._html_search_regex(
r'<span class="title-diffusion">(.*?)</span>', webpage, 'title')
alt_title = self._html_search_regex(
'alt_title': alt_title,
'thumbnail': thumbnail,
'description': description,
+ 'display_id': display_id,
}
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ return self._extract_from_player(url, video_id)
+
+
+class FranceCultureEmissionIE(FranceCultureIE):
+ _VALID_URL = r'https?://(?:www\.)?franceculture\.fr/emission-(?P<id>[^?#]+)'
+ _TEST = {
+ 'url': 'http://www.franceculture.fr/emission-les-carnets-de-la-creation-jean-gabriel-periot-cineaste-2015-10-13',
+ 'info_dict': {
+ 'title': 'Jean-Gabriel Périot, cinéaste',
+ 'alt_title': 'Les Carnets de la création',
+ 'id': '5093239',
+ 'display_id': 'les-carnets-de-la-creation-jean-gabriel-periot-cineaste-2015-10-13',
+ 'ext': 'mp3',
+ 'timestamp': 1444762500,
+ 'upload_date': '20151013',
+ 'description': 'startswith:Aujourd\'hui dans "Les carnets de la création", le cinéaste',
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+ video_path = self._html_search_regex(
+ r'<a class="rf-player-open".*?href="([^"]+)"', webpage, 'video path', 'no_path_player')
+ if video_path == 'no_path_player':
+ raise ExtractorError('no player : no sound in this page.', expected=True)
+ new_id = self._search_regex('play=(?P<id>[0-9]+)', video_path, 'new_id', group='id')
+ video_url = compat_urlparse.urljoin(url, video_path)
+ return self._extract_from_player(video_url, new_id)
# coding: utf-8
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
from ..utils import int_or_none
_TEST = {
'url': 'http://www.franceinter.fr/player/reecouter?play=793962',
'md5': '4764932e466e6f6c79c317d2e74f6884',
- "info_dict": {
+ 'info_dict': {
'id': '793962',
'ext': 'mp3',
'title': 'L’Histoire dans les jeux vidéo',
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = 'http://www.franceinter.fr/' + path
title = self._html_search_regex(
- r'<span class="title">(.+?)</span>', webpage, 'title')
+ r'<span class="title-diffusion">(.+?)</span>', webpage, 'title')
description = self._html_search_regex(
r'<span class="description">(.*?)</span>',
webpage, 'description', fatal=False)
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_id, catalogue = self._html_search_regex(
- r'href="http://videos?\.francetv\.fr/video/([^@]+@[^"]+)"',
+ r'(?:href=|player\.setVideo\(\s*)"http://videos?\.francetv\.fr/video/([^@]+@[^"]+)"',
webpage, 'video ID').split('@')
return self._extract_video(video_id, catalogue)
'info_dict': {
'id': 'vysukany-zadecek-22033',
'ext': 'mp4',
- "title": "vysukany-zadecek-22033",
- "age_limit": 18,
+ 'title': 'vysukany-zadecek-22033',
+ 'age_limit': 18,
},
'skip': 'Blocked outside .cz',
}
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+ clean_html,
+ determine_ext,
+ encode_dict,
+ int_or_none,
+ sanitized_Request,
+ ExtractorError,
+ urlencode_postdata
+)
+
+
+class FunimationIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?funimation\.com/shows/[^/]+/videos/(?:official|promotional)/(?P<id>[^/?#&]+)'
+
+ _NETRC_MACHINE = 'funimation'
+
+ _TESTS = [{
+ 'url': 'http://www.funimation.com/shows/air/videos/official/breeze',
+ 'info_dict': {
+ 'id': '658',
+ 'display_id': 'breeze',
+ 'ext': 'mp4',
+ 'title': 'Air - 1 - Breeze',
+ 'description': 'md5:1769f43cd5fc130ace8fd87232207892',
+ 'thumbnail': 're:https?://.*\.jpg',
+ },
+ }, {
+ 'url': 'http://www.funimation.com/shows/hacksign/videos/official/role-play',
+ 'info_dict': {
+ 'id': '31128',
+ 'display_id': 'role-play',
+ 'ext': 'mp4',
+ 'title': '.hack//SIGN - 1 - Role Play',
+ 'description': 'md5:b602bdc15eef4c9bbb201bb6e6a4a2dd',
+ 'thumbnail': 're:https?://.*\.jpg',
+ },
+ }, {
+ 'url': 'http://www.funimation.com/shows/attack-on-titan-junior-high/videos/promotional/broadcast-dub-preview',
+ 'info_dict': {
+ 'id': '9635',
+ 'display_id': 'broadcast-dub-preview',
+ 'ext': 'mp4',
+ 'title': 'Attack on Titan: Junior High - Broadcast Dub Preview',
+ 'description': 'md5:f8ec49c0aff702a7832cd81b8a44f803',
+ 'thumbnail': 're:https?://.*\.(?:jpg|png)',
+ },
+ }]
+
+ def _login(self):
+ (username, password) = self._get_login_info()
+ if username is None:
+ return
+ data = urlencode_postdata(encode_dict({
+ 'email_field': username,
+ 'password_field': password,
+ }))
+ login_request = sanitized_Request('http://www.funimation.com/login', data, headers={
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0',
+ 'Content-Type': 'application/x-www-form-urlencoded'
+ })
+ login_page = self._download_webpage(
+ login_request, None, 'Logging in as %s' % username)
+ if any(p in login_page for p in ('funimation.com/logout', '>Log Out<')):
+ return
+ error = self._html_search_regex(
+ r'(?s)<div[^>]+id=["\']errorMessages["\'][^>]*>(.+?)</div>',
+ login_page, 'error messages', default=None)
+ if error:
+ raise ExtractorError('Unable to login: %s' % error, expected=True)
+ raise ExtractorError('Unable to log in')
+
+ def _real_initialize(self):
+ self._login()
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+
+ errors = []
+ formats = []
+
+ ERRORS_MAP = {
+ 'ERROR_MATURE_CONTENT_LOGGED_IN': 'matureContentLoggedIn',
+ 'ERROR_MATURE_CONTENT_LOGGED_OUT': 'matureContentLoggedOut',
+ 'ERROR_SUBSCRIPTION_LOGGED_OUT': 'subscriptionLoggedOut',
+ 'ERROR_VIDEO_EXPIRED': 'videoExpired',
+ 'ERROR_TERRITORY_UNAVAILABLE': 'territoryUnavailable',
+ 'SVODBASIC_SUBSCRIPTION_IN_PLAYER': 'basicSubscription',
+ 'SVODNON_SUBSCRIPTION_IN_PLAYER': 'nonSubscription',
+ 'ERROR_PLAYER_NOT_RESPONDING': 'playerNotResponding',
+ 'ERROR_UNABLE_TO_CONNECT_TO_CDN': 'unableToConnectToCDN',
+ 'ERROR_STREAM_NOT_FOUND': 'streamNotFound',
+ }
+
+ USER_AGENTS = (
+ # PC UA is served with m3u8 that provides some bonus lower quality formats
+ ('pc', 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0'),
+ # Mobile UA allows to extract direct links and also does not fail when
+ # PC UA fails with hulu error (e.g.
+ # http://www.funimation.com/shows/hacksign/videos/official/role-play)
+ ('mobile', 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36'),
+ )
+
+ for kind, user_agent in USER_AGENTS:
+ request = sanitized_Request(url)
+ request.add_header('User-Agent', user_agent)
+ webpage = self._download_webpage(
+ request, display_id, 'Downloading %s webpage' % kind)
+
+ playlist = self._parse_json(
+ self._search_regex(
+ r'var\s+playersData\s*=\s*(\[.+?\]);\n',
+ webpage, 'players data'),
+ display_id)[0]['playlist']
+
+ items = next(item['items'] for item in playlist if item.get('items'))
+ item = next(item for item in items if item.get('itemAK') == display_id)
+
+ error_messages = {}
+ video_error_messages = self._search_regex(
+ r'var\s+videoErrorMessages\s*=\s*({.+?});\n',
+ webpage, 'error messages', default=None)
+ if video_error_messages:
+ error_messages_json = self._parse_json(video_error_messages, display_id, fatal=False)
+ if error_messages_json:
+ for _, error in error_messages_json.items():
+ type_ = error.get('type')
+ description = error.get('description')
+ content = error.get('content')
+ if type_ == 'text' and description and content:
+ error_message = ERRORS_MAP.get(description)
+ if error_message:
+ error_messages[error_message] = content
+
+ for video in item.get('videoSet', []):
+ auth_token = video.get('authToken')
+ if not auth_token:
+ continue
+ funimation_id = video.get('FUNImationID') or video.get('videoId')
+ preference = 1 if video.get('languageMode') == 'dub' else 0
+ if not auth_token.startswith('?'):
+ auth_token = '?%s' % auth_token
+ for quality, height in (('sd', 480), ('hd', 720), ('hd1080', 1080)):
+ format_url = video.get('%sUrl' % quality)
+ if not format_url:
+ continue
+ if not format_url.startswith(('http', '//')):
+ errors.append(format_url)
+ continue
+ if determine_ext(format_url) == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ format_url + auth_token, display_id, 'mp4', entry_protocol='m3u8_native',
+ preference=preference, m3u8_id='%s-hls' % funimation_id, fatal=False))
+ else:
+ tbr = int_or_none(self._search_regex(
+ r'-(\d+)[Kk]', format_url, 'tbr', default=None))
+ formats.append({
+ 'url': format_url + auth_token,
+ 'format_id': '%s-http-%dp' % (funimation_id, height),
+ 'height': height,
+ 'tbr': tbr,
+ 'preference': preference,
+ })
+
+ if not formats and errors:
+ raise ExtractorError(
+ '%s returned error: %s'
+ % (self.IE_NAME, clean_html(error_messages.get(errors[0], errors[0]))),
+ expected=True)
+
+ self._sort_formats(formats)
+
+ title = item['title']
+ artist = item.get('artist')
+ if artist:
+ title = '%s - %s' % (artist, title)
+ description = self._og_search_description(webpage) or item.get('description')
+ thumbnail = self._og_search_thumbnail(webpage) or item.get('posterUrl')
+ video_id = item.get('itemId') or display_id
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'formats': formats,
+ }
formats = []
- m3u8_formats = self._extract_m3u8_formats(
- m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)
- if m3u8_formats:
- formats.extend(m3u8_formats)
+ formats.extend(self._extract_m3u8_formats(
+ m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
bitrates = [int(bitrate) for bitrate in re.findall(r'[,/]v(\d+)[,/]', m3u8_url)]
bitrates.sort()
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import int_or_none
+
+
+class GameInformerIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?gameinformer\.com/(?:[^/]+/)*(?P<id>.+)\.aspx'
+ _TEST = {
+ 'url': 'http://www.gameinformer.com/b/features/archive/2015/09/26/replay-animal-crossing.aspx',
+ 'info_dict': {
+ 'id': '4515472681001',
+ 'ext': 'm3u8',
+ 'title': 'Replay - Animal Crossing',
+ 'description': 'md5:2e211891b215c85d061adc7a4dd2d930',
+ 'timestamp': 1443457610706,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ }
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+
+ bc_api_url = self._search_regex(r"getVideo\('([^']+)'", webpage, 'brightcove api url')
+ json_data = self._download_json(
+ bc_api_url + '&video_fields=id,name,shortDescription,publishedDate,videoStillURL,length,IOSRenditions',
+ display_id)
+
+ return {
+ 'id': compat_str(json_data['id']),
+ 'display_id': display_id,
+ 'url': json_data['IOSRenditions'][0]['url'],
+ 'title': json_data['name'],
+ 'description': json_data.get('shortDescription'),
+ 'timestamp': int_or_none(json_data.get('publishedDate')),
+ 'duration': int_or_none(json_data.get('length')),
+ }
xpath_text,
xpath_with_ns,
)
+from .youtube import YoutubeIE
class GamekingsIE(InfoExtractor):
- _VALID_URL = r'http://www\.gamekings\.tv/(?:videos|nieuws)/(?P<id>[^/]+)'
+ _VALID_URL = r'http://www\.gamekings\.nl/(?:videos|nieuws)/(?P<id>[^/]+)'
_TESTS = [{
- 'url': 'http://www.gamekings.tv/videos/phoenix-wright-ace-attorney-dual-destinies-review/',
- # MD5 is flaky, seems to change regularly
- # 'md5': '2f32b1f7b80fdc5cb616efb4f387f8a3',
+ # YouTube embed video
+ 'url': 'http://www.gamekings.nl/videos/phoenix-wright-ace-attorney-dual-destinies-review/',
+ 'md5': '5208d3a17adeaef829a7861887cb9029',
'info_dict': {
- 'id': 'phoenix-wright-ace-attorney-dual-destinies-review',
+ 'id': 'HkSQKetlGOU',
'ext': 'mp4',
- 'title': 'Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review',
- 'description': 'md5:36fd701e57e8c15ac8682a2374c99731',
+ 'title': 'Phoenix Wright: Ace Attorney - Dual Destinies Review',
+ 'description': 'md5:db88c0e7f47e9ea50df3271b9dc72e1d',
'thumbnail': 're:^https?://.*\.jpg$',
+ 'uploader_id': 'UCJugRGo4STYMeFr5RoOShtQ',
+ 'uploader': 'Gamekings Vault',
+ 'upload_date': '20151123',
},
+ 'add_ie': ['Youtube'],
}, {
# vimeo video
- 'url': 'http://www.gamekings.tv/videos/the-legend-of-zelda-majoras-mask/',
+ 'url': 'http://www.gamekings.nl/videos/the-legend-of-zelda-majoras-mask/',
'md5': '12bf04dfd238e70058046937657ea68d',
'info_dict': {
'id': 'the-legend-of-zelda-majoras-mask',
'thumbnail': 're:^https?://.*\.jpg$',
},
}, {
- 'url': 'http://www.gamekings.tv/nieuws/gamekings-extra-shelly-en-david-bereiden-zich-voor-op-de-livestream/',
+ 'url': 'http://www.gamekings.nl/nieuws/gamekings-extra-shelly-en-david-bereiden-zich-voor-op-de-livestream/',
'only_matching': True,
}]
webpage = self._download_webpage(url, video_id)
playlist_id = self._search_regex(
- r'gogoVideo\(\s*\d+\s*,\s*"([^"]+)', webpage, 'playlist id')
+ r'gogoVideo\([^,]+,\s*"([^"]+)', webpage, 'playlist id')
+
+ # Check if a YouTube embed is used
+ if YoutubeIE.suitable(playlist_id):
+ return self.url_result(playlist_id, ie='Youtube')
playlist = self._download_xml(
'http://www.gamekings.tv/wp-content/themes/gk2010/rss_playlist.php?id=%s' % playlist_id,
from __future__ import unicode_literals
-from .mtv import MTVServicesInfoExtractor
+from .common import InfoExtractor
+from ..utils import (
+ int_or_none,
+ parse_age_limit,
+ url_basename,
+)
-class GametrailersIE(MTVServicesInfoExtractor):
- _VALID_URL = r'http://www\.gametrailers\.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)'
+class GametrailersIE(InfoExtractor):
+ _VALID_URL = r'http://www\.gametrailers\.com/videos/view/[^/]+/(?P<id>.+)'
+
_TEST = {
- 'url': 'http://www.gametrailers.com/videos/zbvr8i/mirror-s-edge-2-e3-2013--debut-trailer',
- 'md5': '4c8e67681a0ea7ec241e8c09b3ea8cf7',
+ 'url': 'http://www.gametrailers.com/videos/view/gametrailers-com/116437-Just-Cause-3-Review',
+ 'md5': 'f28c4efa0bdfaf9b760f6507955b6a6a',
'info_dict': {
- 'id': '70e9a5d7-cf25-4a10-9104-6f3e7342ae0d',
+ 'id': '2983958',
'ext': 'mp4',
- 'title': 'E3 2013: Debut Trailer',
- 'description': 'Faith is back! Check out the World Premiere trailer for Mirror\'s Edge 2 straight from the EA Press Conference at E3 2013!',
+ 'display_id': '116437-Just-Cause-3-Review',
+ 'title': 'Just Cause 3 - Review',
+ 'description': 'It\'s a lot of fun to shoot at things and then watch them explode in Just Cause 3, but should there be more to the experience than that?',
},
}
- _FEED_URL = 'http://www.gametrailers.com/feeds/mrss'
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+ title = self._html_search_regex(
+ r'<title>(.+?)\|', webpage, 'title').strip()
+ embed_url = self._proto_relative_url(
+ self._search_regex(
+ r'src=\'(//embed.gametrailers.com/embed/[^\']+)\'', webpage,
+ 'embed url'),
+ scheme='http:')
+ video_id = url_basename(embed_url)
+ embed_page = self._download_webpage(embed_url, video_id)
+ embed_vars_json = self._search_regex(
+ r'(?s)var embedVars = (\{.*?\})\s*</script>', embed_page,
+ 'embed vars')
+ info = self._parse_json(embed_vars_json, video_id)
+
+ formats = []
+ for media in info['media']:
+ if media['mediaPurpose'] == 'play':
+ formats.append({
+ 'url': media['uri'],
+ 'height': media['height'],
+ 'width:': media['width'],
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': title,
+ 'formats': formats,
+ 'thumbnail': info.get('thumbUri'),
+ 'description': self._og_search_description(webpage),
+ 'duration': int_or_none(info.get('videoLengthInSeconds')),
+ 'age_limit': parse_age_limit(info.get('audienceRating')),
+ }
from .condenast import CondeNastIE
from .udn import UDNEmbedIE
from .senateisvp import SenateISVPIE
-from .bliptv import BlipTVIE
from .svt import SVTIE
from .pornhub import PornHubIE
from .xhamster import XHamsterEmbedIE
from .snagfilms import SnagFilmsEmbedIE
from .screenwavemedia import ScreenwaveMediaIE
from .mtv import MTVServicesEmbeddedIE
+from .pladform import PladformIE
+from .videomore import VideomoreIE
+from .googledrive import GoogleDriveIE
+from .jwplatform import JWPlatformIE
+from .digiteka import DigitekaIE
class GenericIE(InfoExtractor):
'skip_download': True,
},
},
+ # MPD from http://dash-mse-test.appspot.com/media.html
+ {
+ 'url': 'http://yt-dash-mse-test.commondatastorage.googleapis.com/media/car-20120827-manifest.mpd',
+ 'md5': '4b57baab2e30d6eb3a6a09f0ba57ef53',
+ 'info_dict': {
+ 'id': 'car-20120827-manifest',
+ 'ext': 'mp4',
+ 'title': 'car-20120827-manifest',
+ 'formats': 'mincount:9',
+ },
+ 'params': {
+ 'format': 'bestvideo',
+ },
+ },
# google redirect
{
'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE',
'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
'ext': 'mp4',
'title': '2cc213299525360.mov', # that's what we get
+ 'duration': 238.231,
},
'add_ie': ['Ooyala'],
},
'ext': 'mp4',
'title': '"Steve Jobs: Man in the Machine" trailer',
'description': 'The first trailer for the Alex Gibney documentary "Steve Jobs: Man in the Machine."',
+ 'duration': 135.427,
},
'params': {
'skip_download': True,
'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
}
},
- # Embeded Ustream video
+ # Embedded Ustream video
{
'url': 'http://www.american.edu/spa/pti/nsa-privacy-janus-2014.cfm',
'md5': '27b99cdb639c9b12a79bca876a073417',
'info_dict': {
'id': '50YnY4czr4ms1vJ7yz3xzq0excz_pUMs',
'ext': 'mp4',
- 'description': 'VIDEO: Index/Match versus VLOOKUP.',
+ 'description': 'VIDEO: INDEX/MATCH versus VLOOKUP.',
'title': 'This is what separates the Excel masters from the wannabes',
+ 'duration': 191.933,
},
'params': {
# m3u8 downloads
# Check for direct link to a video
content_type = head_response.headers.get('Content-Type', '')
- m = re.match(r'^(?P<type>audio|video|application(?=/ogg$))/(?P<format_id>.+)$', content_type)
+ m = re.match(r'^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>.+)$', content_type)
if m:
upload_date = unified_strdate(
head_response.headers.get('Last-Modified'))
+ formats = []
+ if m.group('format_id').endswith('mpegurl'):
+ formats = self._extract_m3u8_formats(url, video_id, 'mp4')
+ else:
+ formats = [{
+ 'format_id': m.group('format_id'),
+ 'url': url,
+ 'vcodec': 'none' if m.group('type') == 'audio' else None
+ }]
return {
'id': video_id,
'title': compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0]),
'direct': True,
- 'formats': [{
- 'format_id': m.group('format_id'),
- 'url': url,
- 'vcodec': 'none' if m.group('type') == 'audio' else None
- }],
+ 'formats': formats,
'upload_date': upload_date,
}
self.report_extraction(video_id)
- # Is it an RSS feed, a SMIL file or a XSPF playlist?
+ # Is it an RSS feed, a SMIL file, an XSPF playlist or a MPD manifest?
try:
doc = compat_etree_fromstring(webpage.encode('utf-8'))
if doc.tag == 'rss':
return self._parse_smil(doc, url, video_id)
elif doc.tag == '{http://xspf.org/ns/0/}playlist':
return self.playlist_result(self._parse_xspf(doc, video_id), video_id)
+ elif re.match(r'(?i)^(?:{[^}]+})?MPD$', doc.tag):
+ return {
+ 'id': video_id,
+ 'title': compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0]),
+ 'formats': self._parse_mpd_formats(
+ doc, video_id, mpd_base_url=url.rpartition('/')[0]),
+ }
except compat_xml_parse_error:
pass
# Look for embedded Dailymotion player
matches = re.findall(
- r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.com/embed/video/.+?)\1', webpage)
+ r'<(?:(?:embed|iframe)[^>]+?src=|input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=)(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.com/(?:embed|swf)/video/.+?)\1', webpage)
if matches:
return _playlist_from_matches(
matches, lambda m: unescapeHTML(m[1]))
'id': match.group('id')
}
- # Look for embedded blip.tv player
- bliptv_url = BlipTVIE._extract_url(webpage)
- if bliptv_url:
- return self.url_result(bliptv_url, 'BlipTV')
-
# Look for SVT player
svt_url = SVTIE._extract_url(webpage)
if svt_url:
re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage) or
re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
if mobj is not None:
- return OoyalaIE._build_url_result(mobj.group('ec'))
+ return OoyalaIE._build_url_result(smuggle_url(mobj.group('ec'), {'domain': url}))
# Look for multiple Ooyala embeds on SBN network websites
mobj = re.search(r'SBN\.VideoLinkset\.entryGroup\((\[.*?\])', webpage)
embeds = self._parse_json(mobj.group(1), video_id, fatal=False)
if embeds:
return _playlist_from_matches(
- embeds, getter=lambda v: OoyalaIE._url_for_embed_code(v['provider_video_id']), ie='Ooyala')
+ embeds, getter=lambda v: OoyalaIE._url_for_embed_code(smuggle_url(v['provider_video_id'], {'domain': url})), ie='Ooyala')
# Look for Aparat videos
mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'VK')
+ # Look for embedded Odnoklassniki player
+ mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:odnoklassniki|ok)\.ru/videoembed/.+?)\1', webpage)
+ if mobj is not None:
+ return self.url_result(mobj.group('url'), 'Odnoklassniki')
+
# Look for embedded ivi player
mobj = re.search(r'<embed[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?ivi\.ru/video/player.+?)\1', webpage)
if mobj is not None:
if myvi_url:
return self.url_result(myvi_url)
- # Look for embeded soundcloud player
+ # Look for embedded soundcloud player
mobj = re.search(
r'<iframe\s+(?:[a-zA-Z0-9_-]+="[^"]+"\s+)*src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"',
webpage)
return self.url_result('eagleplatform:%(host)s:%(id)s' % mobj.groupdict(), 'EaglePlatform')
# Look for Pladform embeds
- mobj = re.search(
- r'<iframe[^>]+src="(?P<url>https?://out\.pladform\.ru/player\?.+?)"', webpage)
- if mobj is not None:
- return self.url_result(mobj.group('url'), 'Pladform')
+ pladform_url = PladformIE._extract_url(webpage)
+ if pladform_url:
+ return self.url_result(pladform_url)
+
+ # Look for Videomore embeds
+ videomore_url = VideomoreIE._extract_url(webpage)
+ if videomore_url:
+ return self.url_result(videomore_url)
# Look for Playwire embeds
mobj = re.search(
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
+ # Look for Google Drive embeds
+ google_drive_url = GoogleDriveIE._extract_url(webpage)
+ if google_drive_url:
+ return self.url_result(google_drive_url, 'GoogleDrive')
+
# Look for UDN embeds
mobj = re.search(
r'<iframe[^>]+src="(?P<url>%s)"' % UDNEmbedIE._PROTOCOL_RELATIVE_VALID_URL, webpage)
if snagfilms_url:
return self.url_result(snagfilms_url)
+ # Look for JWPlatform embeds
+ jwplatform_url = JWPlatformIE._extract_url(webpage)
+ if jwplatform_url:
+ return self.url_result(jwplatform_url, 'JWPlatform')
+
# Look for ScreenwaveMedia embeds
mobj = re.search(ScreenwaveMediaIE.EMBED_PATTERN, webpage)
if mobj is not None:
return self.url_result(unescapeHTML(mobj.group('url')), 'ScreenwaveMedia')
+ # Look for Digiteka embeds
+ digiteka_url = DigitekaIE._extract_url(webpage)
+ if digiteka_url:
+ return self.url_result(self._proto_relative_url(digiteka_url), DigitekaIE.ie_key())
+
+ # Look for Limelight embeds
+ mobj = re.search(r'LimelightPlayer\.doLoad(Media|Channel|ChannelList)\(["\'](?P<id>[a-z0-9]{32})', webpage)
+ if mobj:
+ lm = {
+ 'Media': 'media',
+ 'Channel': 'channel',
+ 'ChannelList': 'channel_list',
+ }
+ return self.url_result('limelight:%s:%s' % (
+ lm[mobj.group(1)], mobj.group(2)), 'Limelight%s' % mobj.group(1), mobj.group(2))
+
# Look for AdobeTVVideo embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]',
return self.playlist_result(self._extract_xspf_playlist(video_url, video_id), video_id)
elif ext == 'm3u8':
entry_info_dict['formats'] = self._extract_m3u8_formats(video_url, video_id, ext='mp4')
+ elif ext == 'mpd':
+ entry_info_dict['formats'] = self._extract_mpd_formats(video_url, video_id)
else:
entry_info_dict['url'] = video_url
'only_matching': True,
}]
- class MD5:
+ class MD5(object):
HEX_FORMAT_LOWERCASE = 0
HEX_FORMAT_UPPERCASE = 1
BASE64_PAD_CHARACTER_DEFAULT_COMPLIANCE = ''
resource_url = resource['url']
signed_url = '%s?h=%s&k=%s' % (resource_url, signed_hash, 'flash')
if resource_id.endswith('m3u8') or resource_url.endswith('.m3u8'):
- m3u8_formats = self._extract_m3u8_formats(
+ formats.extend(self._extract_m3u8_formats(
signed_url, resource_id, 'mp4', entry_protocol='m3u8_native',
- m3u8_id='hls', fatal=False)
- if m3u8_formats:
- formats.extend(m3u8_formats)
+ m3u8_id='hls', fatal=False))
else:
formats.append({
'url': signed_url,
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ int_or_none,
+)
+
+
+class GoogleDriveIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:(?:docs|drive)\.google\.com/(?:uc\?.*?id=|file/d/)|video\.google\.com/get_player\?.*?docid=)(?P<id>[a-zA-Z0-9_-]{28})'
+ _TEST = {
+ 'url': 'https://drive.google.com/file/d/0ByeS4oOUV-49Zzh4R1J6R09zazQ/edit?pli=1',
+ 'md5': '881f7700aec4f538571fa1e0eed4a7b6',
+ 'info_dict': {
+ 'id': '0ByeS4oOUV-49Zzh4R1J6R09zazQ',
+ 'ext': 'mp4',
+ 'title': 'Big Buck Bunny.mp4',
+ 'duration': 46,
+ }
+ }
+ _FORMATS_EXT = {
+ '5': 'flv',
+ '6': 'flv',
+ '13': '3gp',
+ '17': '3gp',
+ '18': 'mp4',
+ '22': 'mp4',
+ '34': 'flv',
+ '35': 'flv',
+ '36': '3gp',
+ '37': 'mp4',
+ '38': 'mp4',
+ '43': 'webm',
+ '44': 'webm',
+ '45': 'webm',
+ '46': 'webm',
+ '59': 'mp4',
+ }
+
+ @staticmethod
+ def _extract_url(webpage):
+ mobj = re.search(
+ r'<iframe[^>]+src="https?://(?:video\.google\.com/get_player\?.*?docid=|(?:docs|drive)\.google\.com/file/d/)(?P<id>[a-zA-Z0-9_-]{28})',
+ webpage)
+ if mobj:
+ return 'https://drive.google.com/file/d/%s' % mobj.group('id')
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(
+ 'http://docs.google.com/file/d/%s' % video_id, video_id, encoding='unicode_escape')
+
+ reason = self._search_regex(r'"reason"\s*,\s*"([^"]+)', webpage, 'reason', default=None)
+ if reason:
+ raise ExtractorError(reason)
+
+ title = self._search_regex(r'"title"\s*,\s*"([^"]+)', webpage, 'title')
+ duration = int_or_none(self._search_regex(
+ r'"length_seconds"\s*,\s*"([^"]+)', webpage, 'length seconds', default=None))
+ fmt_stream_map = self._search_regex(
+ r'"fmt_stream_map"\s*,\s*"([^"]+)', webpage, 'fmt stream map').split(',')
+ fmt_list = self._search_regex(r'"fmt_list"\s*,\s*"([^"]+)', webpage, 'fmt_list').split(',')
+
+ formats = []
+ for fmt, fmt_stream in zip(fmt_list, fmt_stream_map):
+ fmt_id, fmt_url = fmt_stream.split('|')
+ resolution = fmt.split('/')[1]
+ width, height = resolution.split('x')
+ formats.append({
+ 'url': fmt_url,
+ 'format_id': fmt_id,
+ 'resolution': resolution,
+ 'width': int_or_none(width),
+ 'height': int_or_none(height),
+ 'ext': self._FORMATS_EXT[fmt_id],
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'thumbnail': self._og_search_thumbnail(webpage, default=None),
+ 'duration': duration,
+ 'formats': formats,
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+ xpath_element,
+ xpath_text,
+ int_or_none,
+ parse_duration,
+)
+
+
+class GPUTechConfIE(InfoExtractor):
+ _VALID_URL = r'https?://on-demand\.gputechconf\.com/gtc/2015/video/S(?P<id>\d+)\.html'
+ _TEST = {
+ 'url': 'http://on-demand.gputechconf.com/gtc/2015/video/S5156.html',
+ 'md5': 'a8862a00a0fd65b8b43acc5b8e33f798',
+ 'info_dict': {
+ 'id': '5156',
+ 'ext': 'mp4',
+ 'title': 'Coordinating More Than 3 Million CUDA Threads for Social Network Analysis',
+ 'duration': 1219,
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ root_path = self._search_regex(r'var\s+rootPath\s*=\s*"([^"]+)', webpage, 'root path', 'http://evt.dispeak.com/nvidia/events/gtc15/')
+ xml_file_id = self._search_regex(r'var\s+xmlFileId\s*=\s*"([^"]+)', webpage, 'xml file id')
+
+ doc = self._download_xml('%sxml/%s.xml' % (root_path, xml_file_id), video_id)
+
+ metadata = xpath_element(doc, 'metadata')
+ http_host = xpath_text(metadata, 'httpHost', 'http host', True)
+ mbr_videos = xpath_element(metadata, 'MBRVideos')
+
+ formats = []
+ for mbr_video in mbr_videos.findall('MBRVideo'):
+ stream_name = xpath_text(mbr_video, 'streamName')
+ if stream_name:
+ formats.append({
+ 'url': 'http://%s/%s' % (http_host, stream_name.replace('mp4:', '')),
+ 'tbr': int_or_none(xpath_text(mbr_video, 'bitrate')),
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': xpath_text(metadata, 'title'),
+ 'duration': parse_duration(xpath_text(metadata, 'endTime')),
+ 'creator': xpath_text(metadata, 'speaker'),
+ 'formats': formats,
+ }
'id': 'tubGNycTo_9Uxg82uESj4i61EYX8nyuf',
'ext': 'mp4',
'title': 'Bikram Yoga Huntington Beach | Orange County',
+ 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
+ 'duration': 44.961,
},
}],
'params': {
'info_dict': {
'id': 'inyouchuu-etsu-bonus',
'ext': 'mp4',
- "title": "Inyouchuu Etsu Bonus",
- "age_limit": 18,
+ 'title': 'Inyouchuu Etsu Bonus',
+ 'age_limit': 18,
}
}
+++ /dev/null
-from __future__ import unicode_literals
-
-from .common import InfoExtractor
-from ..utils import smuggle_url
-
-
-class HistoryIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?history\.com/(?:[^/]+/)+(?P<id>[^/]+?)(?:$|[?#])'
-
- _TESTS = [{
- 'url': 'http://www.history.com/topics/valentines-day/history-of-valentines-day/videos/bet-you-didnt-know-valentines-day?m=528e394da93ae&s=undefined&f=1&free=false',
- 'md5': '6fe632d033c92aa10b8d4a9be047a7c5',
- 'info_dict': {
- 'id': 'bLx5Dv5Aka1G',
- 'ext': 'mp4',
- 'title': "Bet You Didn't Know: Valentine's Day",
- 'description': 'md5:7b57ea4829b391995b405fa60bd7b5f7',
- },
- 'add_ie': ['ThePlatform'],
- }]
-
- def _real_extract(self, url):
- video_id = self._match_id(url)
-
- webpage = self._download_webpage(url, video_id)
-
- video_url = self._search_regex(
- r'data-href="[^"]*/%s"[^>]+data-release-url="([^"]+)"' % video_id,
- webpage, 'video url')
-
- return self.url_result(smuggle_url(video_url, {'sig': {'key': 'crazyjava', 'secret': 's3cr3t'}}))
cdns = player_config.get('cdns')
servers = []
for cdn in cdns:
+ # Subscribe URLs are not playable
+ if cdn.get('rtmpSubscribe') is True:
+ continue
base_url = cdn.get('netConnectionUrl')
host = re.search('.+\.([^\.]+\.[^\./]+)/.+', base_url).group(1)
if base_url not in servers:
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ determine_ext,
+ int_or_none,
+)
+
+
+class HotStarIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?hotstar\.com/(?:.+?[/-])?(?P<id>\d{10})'
+ _TESTS = [{
+ 'url': 'http://www.hotstar.com/on-air-with-aib--english-1000076273',
+ 'info_dict': {
+ 'id': '1000076273',
+ 'ext': 'mp4',
+ 'title': 'On Air With AIB - English',
+ 'description': 'md5:c957d8868e9bc793ccb813691cc4c434',
+ 'timestamp': 1447227000,
+ 'upload_date': '20151111',
+ 'duration': 381,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ }
+ }, {
+ 'url': 'http://www.hotstar.com/sports/cricket/rajitha-sizzles-on-debut-with-329/2001477583',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.hotstar.com/1000000515',
+ 'only_matching': True,
+ }]
+
+ _GET_CONTENT_TEMPLATE = 'http://account.hotstar.com/AVS/besc?action=GetAggregatedContentDetails&channel=PCTV&contentId=%s'
+ _GET_CDN_TEMPLATE = 'http://getcdn.hotstar.com/AVS/besc?action=GetCDN&asJson=Y&channel=%s&id=%s&type=%s'
+
+ def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata', fatal=True):
+ json_data = super(HotStarIE, self)._download_json(url_or_request, video_id, note, fatal=fatal)
+ if json_data['resultCode'] != 'OK':
+ if fatal:
+ raise ExtractorError(json_data['errorDescription'])
+ return None
+ return json_data['resultObj']
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ video_data = self._download_json(
+ self._GET_CONTENT_TEMPLATE % video_id,
+ video_id)['contentInfo'][0]
+
+ formats = []
+ # PCTV for extracting f4m manifest
+ for f in ('TABLET',):
+ format_data = self._download_json(
+ self._GET_CDN_TEMPLATE % (f, video_id, 'VOD'),
+ video_id, 'Downloading %s JSON metadata' % f, fatal=False)
+ if format_data:
+ format_url = format_data['src']
+ ext = determine_ext(format_url)
+ if ext == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(format_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
+ elif ext == 'f4m':
+ # produce broken files
+ continue
+ else:
+ formats.append({
+ 'url': format_url,
+ 'width': int_or_none(format_data.get('width')),
+ 'height': int_or_none(format_data.get('height')),
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': video_data['episodeTitle'],
+ 'description': video_data.get('description'),
+ 'duration': int_or_none(video_data.get('duration')),
+ 'timestamp': int_or_none(video_data.get('broadcastDate')),
+ 'formats': formats,
+ }
'description': 'md5:dbe792e5f6f1489027027bf2eba188a3',
'timestamp': 1276081287,
'upload_date': '20100609',
+ 'duration': 56.823,
},
'params': {
# m3u8 download
track_id = self._match_id(url)
data = {'ax': 1, 'ts': time.time()}
- data_encoded = compat_urllib_parse.urlencode(data)
- complete_url = url + "?" + data_encoded
- request = sanitized_Request(complete_url)
+ request = sanitized_Request(url + '?' + compat_urllib_parse.urlencode(data))
response, urlh = self._download_webpage_handle(
request, track_id, 'Downloading webpage with the url')
- cookie = urlh.headers.get('Set-Cookie', '')
html_tracks = self._html_search_regex(
- r'(?ms)<script type="application/json" id="displayList-data">\s*(.*?)\s*</script>',
+ r'(?ms)<script type="application/json" id="displayList-data">(.+?)</script>',
response, 'tracks')
try:
track_list = json.loads(html_tracks)
key = track['key']
track_id = track['id']
- artist = track['artist']
title = track['song']
- serve_url = "http://hypem.com/serve/source/%s/%s" % (track_id, key)
request = sanitized_Request(
- serve_url, '', {'Content-Type': 'application/json'})
- request.add_header('cookie', cookie)
+ 'http://hypem.com/serve/source/%s/%s' % (track_id, key),
+ '', {'Content-Type': 'application/json'})
song_data = self._download_json(request, track_id, 'Downloading metadata')
- final_url = song_data["url"]
+ final_url = song_data['url']
+ artist = track.get('artist')
return {
'id': track_id,
import re
from .common import InfoExtractor
+from ..utils import (
+ int_or_none,
+ parse_iso8601,
+)
class IGNIE(InfoExtractor):
Some videos of it.ign.com are also supported
"""
- _VALID_URL = r'https?://.+?\.ign\.com/(?P<type>videos|show_videos|articles|(?:[^/]*/feature))(/.+)?/(?P<name_or_id>.+)'
+ _VALID_URL = r'https?://.+?\.ign\.com/(?:[^/]+/)?(?P<type>videos|show_videos|articles|feature|(?:[^/]+/\d+/video))(/.+)?/(?P<name_or_id>.+)'
IE_NAME = 'ign.com'
- _CONFIG_URL_TEMPLATE = 'http://www.ign.com/videos/configs/id/%s.config'
- _DESCRIPTION_RE = [
- r'<span class="page-object-description">(.+?)</span>',
- r'id="my_show_video">.*?<p>(.*?)</p>',
- r'<meta name="description" content="(.*?)"',
- ]
+ _API_URL_TEMPLATE = 'http://apis.ign.com/video/v3/videos/%s'
+ _EMBED_RE = r'<iframe[^>]+?["\']((?:https?:)?//.+?\.ign\.com.+?/embed.+?)["\']'
_TESTS = [
{
'url': 'http://www.ign.com/videos/2013/06/05/the-last-of-us-review',
- 'md5': 'eac8bdc1890980122c3b66f14bdd02e9',
+ 'md5': 'febda82c4bafecd2d44b6e1a18a595f8',
'info_dict': {
'id': '8f862beef863986b2785559b9e1aa599',
'ext': 'mp4',
'title': 'The Last of Us Review',
'description': 'md5:c8946d4260a4d43a00d5ae8ed998870c',
+ 'timestamp': 1370440800,
+ 'upload_date': '20130605',
+ 'uploader_id': 'cberidon@ign.com',
}
},
{
'ext': 'mp4',
'title': 'GTA 5 Video Review',
'description': 'Rockstar drops the mic on this generation of games. Watch our review of the masterly Grand Theft Auto V.',
+ 'timestamp': 1379339880,
+ 'upload_date': '20130916',
+ 'uploader_id': 'danieljkrupa@gmail.com',
},
},
{
'ext': 'mp4',
'title': '26 Twisted Moments from GTA 5 in Slow Motion',
'description': 'The twisted beauty of GTA 5 in stunning slow motion.',
+ 'timestamp': 1386878820,
+ 'upload_date': '20131212',
+ 'uploader_id': 'togilvie@ign.com',
},
},
],
'id': '078fdd005f6d3c02f63d795faa1b984f',
'ext': 'mp4',
'title': 'Rewind Theater - Wild Trailer Gamescom 2014',
- 'description': (
- 'Giant skeletons, bloody hunts, and captivating'
- ' natural beauty take our breath away.'
- ),
+ 'description': 'Brian and Jared explore Michel Ancel\'s captivating new preview.',
+ 'timestamp': 1408047180,
+ 'upload_date': '20140814',
+ 'uploader_id': 'jamesduggan1990@gmail.com',
},
},
+ {
+ 'url': 'http://me.ign.com/en/videos/112203/video/how-hitman-aims-to-be-different-than-every-other-s',
+ 'only_matching': True,
+ },
+ {
+ 'url': 'http://me.ign.com/ar/angry-birds-2/106533/video/lrd-ldyy-lwl-lfylm-angry-birds',
+ 'only_matching': True,
+ },
]
def _find_video_id(self, webpage):
r'<object id="vid_(.+?)"',
r'<meta name="og:image" content=".*/(.+?)-(.+?)/.+.jpg"',
]
- return self._search_regex(res_id, webpage, 'video id')
+ return self._search_regex(res_id, webpage, 'video id', default=None)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
webpage = self._download_webpage(url, name_or_id)
if page_type != 'video':
multiple_urls = re.findall(
- '<param name="flashvars"[^>]*value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]',
+ r'<param name="flashvars"[^>]*value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]',
webpage)
if multiple_urls:
entries = [self.url_result(u, ie='IGN') for u in multiple_urls]
}
video_id = self._find_video_id(webpage)
- result = self._get_video_info(video_id)
- description = self._html_search_regex(self._DESCRIPTION_RE,
- webpage, 'video description', flags=re.DOTALL)
- result['description'] = description
- return result
+ if not video_id:
+ return self.url_result(self._search_regex(
+ self._EMBED_RE, webpage, 'embed url'))
+ return self._get_video_info(video_id)
def _get_video_info(self, video_id):
- config_url = self._CONFIG_URL_TEMPLATE % video_id
- config = self._download_json(config_url, video_id)
- media = config['playlist']['media']
+ api_data = self._download_json(
+ self._API_URL_TEMPLATE % video_id, video_id)
+
+ formats = []
+ m3u8_url = api_data['refs'].get('m3uUrl')
+ if m3u8_url:
+ formats.extend(self._extract_m3u8_formats(
+ m3u8_url, video_id, 'mp4', 'm3u8_native',
+ m3u8_id='hls', fatal=False))
+ f4m_url = api_data['refs'].get('f4mUrl')
+ if f4m_url:
+ formats.extend(self._extract_f4m_formats(
+ f4m_url, video_id, f4m_id='hds', fatal=False))
+ for asset in api_data['assets']:
+ formats.append({
+ 'url': asset['url'],
+ 'tbr': asset.get('actual_bitrate_kbps'),
+ 'fps': asset.get('frame_rate'),
+ 'height': int_or_none(asset.get('height')),
+ 'width': int_or_none(asset.get('width')),
+ })
+ self._sort_formats(formats)
+
+ thumbnails = [{
+ 'url': thumbnail['url']
+ } for thumbnail in api_data.get('thumbnails', [])]
+
+ metadata = api_data['metadata']
return {
- 'id': media['metadata']['videoId'],
- 'url': media['url'],
- 'title': media['metadata']['title'],
- 'thumbnail': media['poster'][0]['url'].replace('{size}', 'grande'),
+ 'id': api_data.get('videoId') or video_id,
+ 'title': metadata.get('longTitle') or metadata.get('name') or metadata.get['title'],
+ 'description': metadata.get('description'),
+ 'timestamp': parse_iso8601(metadata.get('publishDate')),
+ 'duration': int_or_none(metadata.get('duration')),
+ 'display_id': metadata.get('slug') or video_id,
+ 'uploader_id': metadata.get('creator'),
+ 'thumbnails': thumbnails,
+ 'formats': formats,
}
_VALID_URL = r'https?://gamevideos\.1up\.com/(?P<type>video)/id/(?P<name_or_id>.+)\.html'
IE_NAME = '1up.com'
- _DESCRIPTION_RE = r'<div id="vid_summary">(.+?)</div>'
-
_TESTS = [{
'url': 'http://gamevideos.1up.com/video/id/34976.html',
- 'md5': '68a54ce4ebc772e4b71e3123d413163d',
+ 'md5': 'c9cc69e07acb675c31a16719f909e347',
'info_dict': {
'id': '34976',
'ext': 'mp4',
'title': 'Sniper Elite V2 - Trailer',
- 'description': 'md5:5d289b722f5a6d940ca3136e9dae89cf',
+ 'description': 'md5:bf0516c5ee32a3217aa703e9b1bc7826',
+ 'timestamp': 1313099220,
+ 'upload_date': '20110811',
+ 'uploader_id': 'IGN',
}
}]
result = super(OneUPIE, self)._real_extract(url)
result['id'] = mobj.group('name_or_id')
return result
+
+
+class PCMagIE(IGNIE):
+ _VALID_URL = r'https?://(?:www\.)?pcmag\.com/(?P<type>videos|article2)(/.+)?/(?P<name_or_id>.+)'
+ IE_NAME = 'pcmag'
+
+ _EMBED_RE = r'iframe.setAttribute\("src",\s*__util.objToUrlString\("http://widgets\.ign\.com/video/embed/content.html?[^"]*url=([^"]+)["&]'
+
+ _TESTS = [{
+ 'url': 'http://www.pcmag.com/videos/2015/01/06/010615-whats-new-now-is-gogo-snooping-on-your-data',
+ 'md5': '212d6154fd0361a2781075f1febbe9ad',
+ 'info_dict': {
+ 'id': 'ee10d774b508c9b8ec07e763b9125b91',
+ 'ext': 'mp4',
+ 'title': '010615_What\'s New Now: Is GoGo Snooping on Your Data?',
+ 'description': 'md5:a7071ae64d2f68cc821c729d4ded6bb3',
+ 'timestamp': 1420571160,
+ 'upload_date': '20150106',
+ 'uploader_id': 'cozzipix@gmail.com',
+ }
+ }, {
+ 'url': 'http://www.pcmag.com/article2/0,2817,2470156,00.asp',
+ 'md5': '94130c1ca07ba0adb6088350681f16c1',
+ 'info_dict': {
+ 'id': '042e560ba94823d43afcb12ddf7142ca',
+ 'ext': 'mp4',
+ 'title': 'HTC\'s Weird New Re Camera - What\'s New Now',
+ 'description': 'md5:53433c45df96d2ea5d0fda18be2ca908',
+ 'timestamp': 1412953920,
+ 'upload_date': '20141010',
+ 'uploader_id': 'chris_snyder@pcmag.com',
+ }
+ }]
class ImgurIE(InfoExtractor):
- _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?!gallery)(?P<id>[a-zA-Z0-9]+)'
+ _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?:(?:gallery|topic/[^/]+)/)?(?P<id>[a-zA-Z0-9]{6,})(?:[/?#&]+|\.[a-z]+)?$'
_TESTS = [{
'url': 'https://i.imgur.com/A61SaA1.gifv',
'id': 'A61SaA1',
'ext': 'mp4',
'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
- 'description': 're:The origin of the Internet\'s most viral images$|The Internet\'s visual storytelling community\. Explore, share, and discuss the best visual stories the Internet has to offer\.$',
+ 'description': 'Imgur: The most awesome images on the Internet.',
},
}, {
'url': 'https://imgur.com/A61SaA1',
'id': 'A61SaA1',
'ext': 'mp4',
'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
- 'description': 're:The origin of the Internet\'s most viral images$|The Internet\'s visual storytelling community\. Explore, share, and discuss the best visual stories the Internet has to offer\.$',
+ 'description': 'Imgur: The most awesome images on the Internet.',
},
+ }, {
+ 'url': 'https://imgur.com/gallery/YcAQlkx',
+ 'info_dict': {
+ 'id': 'YcAQlkx',
+ 'ext': 'mp4',
+ 'title': 'Classic Steve Carell gif...cracks me up everytime....damn the repost downvotes....',
+ 'description': 'Imgur: The most awesome images on the Internet.'
+
+ }
+ }, {
+ 'url': 'http://imgur.com/topic/Funny/N8rOudd',
+ 'only_matching': True,
}]
def _real_extract(self, url):
class ImgurAlbumIE(InfoExtractor):
- _VALID_URL = r'https?://(?:i\.)?imgur\.com/gallery/(?P<id>[a-zA-Z0-9]+)'
+ _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?:(?:a|gallery|topic/[^/]+)/)?(?P<id>[a-zA-Z0-9]{5})(?:[/?#&]+)?$'
- _TEST = {
+ _TESTS = [{
'url': 'http://imgur.com/gallery/Q95ko',
'info_dict': {
'id': 'Q95ko',
},
'playlist_count': 25,
- }
+ }, {
+ 'url': 'http://imgur.com/a/j6Orj',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://imgur.com/topic/Aww/ll5Vk',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
album_id = self._match_id(url)
album_images = self._download_json(
'http://imgur.com/gallery/%s/album_images/hit.json?all=true' % album_id,
- album_id)['data']['images']
-
- entries = [
- self.url_result('http://imgur.com/%s' % image['hash'])
- for image in album_images if image.get('hash')]
-
- return self.playlist_result(entries, album_id)
+ album_id, fatal=False)
+
+ if album_images:
+ data = album_images.get('data')
+ if data and isinstance(data, dict):
+ images = data.get('images')
+ if images and isinstance(images, list):
+ entries = [
+ self.url_result('http://imgur.com/%s' % image['hash'])
+ for image in images if image.get('hash')]
+ return self.playlist_result(entries, album_id)
+
+ # Fallback to single video
+ return self.url_result('http://imgur.com/%s' % album_id, ImgurIE.ie_key())
+# coding: utf-8
+
from __future__ import unicode_literals
import base64
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
- compat_urlparse,
+ compat_parse_qs,
)
+from ..utils import determine_ext
class InfoQIE(InfoExtractor):
'url': 'http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things',
'md5': 'b5ca0e0a8c1fed93b0e65e48e462f9a2',
'info_dict': {
- 'id': '12-jan-pythonthings',
+ 'id': 'A-Few-of-My-Favorite-Python-Things',
'ext': 'mp4',
'description': 'Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.',
'title': 'A Few of My Favorite [Python] Things',
}, {
'url': 'http://www.infoq.com/fr/presentations/changez-avis-sur-javascript',
'only_matching': True,
+ }, {
+ 'url': 'http://www.infoq.com/cn/presentations/openstack-continued-delivery',
+ 'md5': '4918d0cca1497f2244572caf626687ef',
+ 'info_dict': {
+ 'id': 'openstack-continued-delivery',
+ 'title': 'OpenStack持续交付之路',
+ 'ext': 'flv',
+ 'description': 'md5:308d981fb28fa42f49f9568322c683ff',
+ },
}]
- def _real_extract(self, url):
- video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
+ def _extract_bokecc_videos(self, webpage, video_id):
+ # TODO: bokecc.com is a Chinese video cloud platform
+ # It should have an independent extractor but I don't have other
+ # examples using bokecc
+ player_params_str = self._html_search_regex(
+ r'<script[^>]+src="http://p\.bokecc\.com/player\?([^"]+)',
+ webpage, 'player params', default=None)
- video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
- video_description = self._html_search_meta('description', webpage, 'description')
+ player_params = compat_parse_qs(player_params_str)
+
+ info_xml = self._download_xml(
+ 'http://p.bokecc.com/servlet/playinfo?uid=%s&vid=%s&m=1' % (
+ player_params['siteid'][0], player_params['vid'][0]), video_id)
+
+ return [{
+ 'format_id': 'bokecc',
+ 'url': quality.find('./copy').attrib['playurl'],
+ 'preference': int(quality.attrib['value']),
+ } for quality in info_xml.findall('./video/quality')]
+ def _extract_rtmp_videos(self, webpage):
# The server URL is hardcoded
video_url = 'rtmpe://video.infoq.com/cfx/st/'
# Extract video URL
encoded_id = self._search_regex(
- r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id')
+ r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id', default=None)
+
real_id = compat_urllib_parse_unquote(base64.b64decode(encoded_id.encode('ascii')).decode('utf-8'))
playpath = 'mp4:' + real_id
- video_filename = playpath.split('/')[-1]
- video_id, extension = video_filename.split('.')
-
- http_base = self._search_regex(
- r'EXPRESSINSTALL_SWF\s*=\s*[^"]*"((?:https?:)?//[^/"]+/)', webpage,
- 'HTTP base URL')
-
- formats = [{
+ return [{
'format_id': 'rtmp',
'url': video_url,
- 'ext': extension,
+ 'ext': determine_ext(playpath),
'play_path': playpath,
- }, {
+ }]
+
+ def _extract_http_videos(self, webpage):
+ http_video_url = self._search_regex(r'P\.s\s*=\s*\'([^\']+)\'', webpage, 'video URL')
+
+ policy = self._search_regex(r'InfoQConstants.scp\s*=\s*\'([^\']+)\'', webpage, 'policy')
+ signature = self._search_regex(r'InfoQConstants.scs\s*=\s*\'([^\']+)\'', webpage, 'signature')
+ key_pair_id = self._search_regex(r'InfoQConstants.sck\s*=\s*\'([^\']+)\'', webpage, 'key-pair-id')
+
+ return [{
'format_id': 'http',
- 'url': compat_urlparse.urljoin(url, http_base) + real_id,
+ 'url': http_video_url,
+ 'http_headers': {
+ 'Cookie': 'CloudFront-Policy=%s; CloudFront-Signature=%s; CloudFront-Key-Pair-Id=%s' % (
+ policy, signature, key_pair_id),
+ },
}]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
+ video_description = self._html_search_meta('description', webpage, 'description')
+
+ if '/cn/' in url:
+ # for China videos, HTTP video URL exists but always fails with 403
+ formats = self._extract_bokecc_videos(webpage, video_id)
+ else:
+ formats = self._extract_rtmp_videos(webpage) + self._extract_http_videos(webpage)
+
self._sort_formats(formats)
return {
'title': 'Video by naomipq',
'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
}
+ }, {
+ # missing description
+ 'url': 'https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears',
+ 'info_dict': {
+ 'id': 'BA-pQFBG8HZ',
+ 'ext': 'mp4',
+ 'uploader_id': 'britneyspears',
+ 'title': 'Video by britneyspears',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
}, {
'url': 'https://instagram.com/p/-Cmh1cukG2/',
'only_matching': True,
webpage = self._download_webpage(url, video_id)
uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"',
webpage, 'uploader id', fatal=False)
- desc = self._search_regex(r'"caption":"(.*?)"', webpage, 'description',
- fatal=False)
+ desc = self._search_regex(
+ r'"caption":"(.+?)"', webpage, 'description', default=None)
return {
'id': video_id,
class InstagramUserIE(InfoExtractor):
- _VALID_URL = r'https://instagram\.com/(?P<username>[^/]{2,})/?(?:$|[?#])'
+ _VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<username>[^/]{2,})/?(?:$|[?#])'
IE_DESC = 'Instagram user profile'
IE_NAME = 'instagram:user'
_TEST = {
from __future__ import unicode_literals
import re
-from random import random
-from math import floor
+import time
from .common import InfoExtractor
from ..utils import (
- ExtractorError,
- remove_end,
sanitized_Request,
)
class IPrimaIE(InfoExtractor):
- _VALID_URL = r'https?://play\.iprima\.cz/(?:[^/]+/)*(?P<id>[^?#]+)'
+ _VALID_URL = r'https?://play\.iprima\.cz/(?:.+/)?(?P<id>[^?#]+)'
_TESTS = [{
- 'url': 'http://play.iprima.cz/particka/particka-92',
+ 'url': 'http://play.iprima.cz/gondici-s-r-o-33',
'info_dict': {
- 'id': '39152',
- 'ext': 'flv',
- 'title': 'Partička (92)',
- 'description': 'md5:74e9617e51bca67c3ecfb2c6f9766f45',
- 'thumbnail': 'http://play.iprima.cz/sites/default/files/image_crops/image_620x349/3/491483_particka-92_image_620x349.jpg',
+ 'id': 'p136534',
+ 'ext': 'mp4',
+ 'title': 'Gondíci s. r. o. (34)',
+ 'description': 'md5:16577c629d006aa91f59ca8d8e7f99bd',
},
'params': {
- 'skip_download': True, # requires rtmpdump
+ 'skip_download': True, # m3u8 download
},
}, {
- 'url': 'http://play.iprima.cz/particka/tchibo-particka-jarni-moda',
- 'info_dict': {
- 'id': '9718337',
- 'ext': 'flv',
- 'title': 'Tchibo Partička - Jarní móda',
- 'thumbnail': 're:^http:.*\.jpg$',
- },
- 'params': {
- 'skip_download': True, # requires rtmpdump
- },
- }, {
- 'url': 'http://play.iprima.cz/zpravy-ftv-prima-2752015',
+ 'url': 'http://play.iprima.cz/particka/particka-92',
'only_matching': True,
}]
webpage = self._download_webpage(url, video_id)
- if re.search(r'Nemáte oprávnění přistupovat na tuto stránku\.\s*</div>', webpage):
- raise ExtractorError(
- '%s said: You do not have permission to access this page' % self.IE_NAME, expected=True)
+ video_id = self._search_regex(r'data-product="([^"]+)">', webpage, 'real id')
- player_url = (
- 'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' %
- (floor(random() * 1073741824), floor(random() * 1073741824))
- )
-
- req = sanitized_Request(player_url)
+ req = sanitized_Request(
+ 'http://play.iprima.cz/prehravac/init?_infuse=1'
+ '&_ts=%s&productId=%s' % (round(time.time()), video_id))
req.add_header('Referer', url)
- playerpage = self._download_webpage(req, video_id)
-
- base_url = ''.join(re.findall(r"embed\['stream'\] = '(.+?)'.+'(\?auth=)'.+'(.+?)';", playerpage)[1])
-
- zoneGEO = self._html_search_regex(r'"zoneGEO":(.+?),', webpage, 'zoneGEO')
- if zoneGEO != '0':
- base_url = base_url.replace('token', 'token_' + zoneGEO)
-
- formats = []
- for format_id in ['lq', 'hq', 'hd']:
- filename = self._html_search_regex(
- r'"%s_id":(.+?),' % format_id, webpage, 'filename')
-
- if filename == 'null':
- continue
-
- real_id = self._search_regex(
- r'Prima-(?:[0-9]{10}|WEB)-([0-9]+)[-_]',
- filename, 'real video id')
+ playerpage = self._download_webpage(req, video_id, note='Downloading player')
- if format_id == 'lq':
- quality = 0
- elif format_id == 'hq':
- quality = 1
- elif format_id == 'hd':
- quality = 2
- filename = 'hq/' + filename
+ m3u8_url = self._search_regex(r"'src': '([^']+\.m3u8)'", playerpage, 'm3u8 url')
- formats.append({
- 'format_id': format_id,
- 'url': base_url,
- 'quality': quality,
- 'play_path': 'mp4:' + filename.replace('"', '')[:-4],
- 'rtmp_live': True,
- 'ext': 'flv',
- })
+ formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4')
self._sort_formats(formats)
return {
- 'id': real_id,
- 'title': remove_end(self._og_search_title(webpage), ' | Prima PLAY'),
+ 'id': video_id,
+ 'title': self._og_search_title(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'formats': formats,
- 'description': self._search_regex(
- r'<p[^>]+itemprop="description"[^>]*>([^<]+)',
- webpage, 'description', default=None),
+ 'description': self._og_search_description(webpage),
}
from __future__ import unicode_literals
import hashlib
+import itertools
import math
+import os
import random
+import re
import time
import uuid
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
-from ..utils import ExtractorError
+from ..compat import (
+ compat_parse_qs,
+ compat_str,
+ compat_urllib_parse,
+ compat_urllib_parse_urlparse,
+)
+from ..utils import (
+ ExtractorError,
+ ohdave_rsa_encrypt,
+ remove_start,
+ sanitized_Request,
+ urlencode_postdata,
+ url_basename,
+)
+
+
+def md5_text(text):
+ return hashlib.md5(text.encode('utf-8')).hexdigest()
+
+
+class IqiyiSDK(object):
+ def __init__(self, target, ip, timestamp):
+ self.target = target
+ self.ip = ip
+ self.timestamp = timestamp
+
+ @staticmethod
+ def split_sum(data):
+ return compat_str(sum(map(lambda p: int(p, 16), list(data))))
+
+ @staticmethod
+ def digit_sum(num):
+ if isinstance(num, int):
+ num = compat_str(num)
+ return compat_str(sum(map(int, num)))
+
+ def even_odd(self):
+ even = self.digit_sum(compat_str(self.timestamp)[::2])
+ odd = self.digit_sum(compat_str(self.timestamp)[1::2])
+ return even, odd
+
+ def preprocess(self, chunksize):
+ self.target = md5_text(self.target)
+ chunks = []
+ for i in range(32 // chunksize):
+ chunks.append(self.target[chunksize * i:chunksize * (i + 1)])
+ if 32 % chunksize:
+ chunks.append(self.target[32 - 32 % chunksize:])
+ return chunks, list(map(int, self.ip.split('.')))
+
+ def mod(self, modulus):
+ chunks, ip = self.preprocess(32)
+ self.target = chunks[0] + ''.join(map(lambda p: compat_str(p % modulus), ip))
+
+ def split(self, chunksize):
+ modulus_map = {
+ 4: 256,
+ 5: 10,
+ 8: 100,
+ }
+
+ chunks, ip = self.preprocess(chunksize)
+ ret = ''
+ for i in range(len(chunks)):
+ ip_part = compat_str(ip[i] % modulus_map[chunksize]) if i < 4 else ''
+ if chunksize == 8:
+ ret += ip_part + chunks[i]
+ else:
+ ret += chunks[i] + ip_part
+ self.target = ret
+
+ def handle_input16(self):
+ self.target = md5_text(self.target)
+ self.target = self.split_sum(self.target[:16]) + self.target + self.split_sum(self.target[16:])
+
+ def handle_input8(self):
+ self.target = md5_text(self.target)
+ ret = ''
+ for i in range(4):
+ part = self.target[8 * i:8 * (i + 1)]
+ ret += self.split_sum(part) + part
+ self.target = ret
+
+ def handleSum(self):
+ self.target = md5_text(self.target)
+ self.target = self.split_sum(self.target) + self.target
+
+ def date(self, scheme):
+ self.target = md5_text(self.target)
+ d = time.localtime(self.timestamp)
+ strings = {
+ 'y': compat_str(d.tm_year),
+ 'm': '%02d' % d.tm_mon,
+ 'd': '%02d' % d.tm_mday,
+ }
+ self.target += ''.join(map(lambda c: strings[c], list(scheme)))
+
+ def split_time_even_odd(self):
+ even, odd = self.even_odd()
+ self.target = odd + md5_text(self.target) + even
+
+ def split_time_odd_even(self):
+ even, odd = self.even_odd()
+ self.target = even + md5_text(self.target) + odd
+
+ def split_ip_time_sum(self):
+ chunks, ip = self.preprocess(32)
+ self.target = compat_str(sum(ip)) + chunks[0] + self.digit_sum(self.timestamp)
+
+ def split_time_ip_sum(self):
+ chunks, ip = self.preprocess(32)
+ self.target = self.digit_sum(self.timestamp) + chunks[0] + compat_str(sum(ip))
+
+
+class IqiyiSDKInterpreter(object):
+ BASE62_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
+
+ def __init__(self, sdk_code):
+ self.sdk_code = sdk_code
+
+ @classmethod
+ def base62(cls, num):
+ if num == 0:
+ return '0'
+ ret = ''
+ while num:
+ ret = cls.BASE62_TABLE[num % 62] + ret
+ num = num // 62
+ return ret
+
+ def decode_eval_codes(self):
+ self.sdk_code = self.sdk_code[5:-3]
+
+ mobj = re.search(
+ r"'([^']+)',62,(\d+),'([^']+)'\.split\('\|'\),[^,]+,{}",
+ self.sdk_code)
+ obfucasted_code, count, symbols = mobj.groups()
+ count = int(count)
+ symbols = symbols.split('|')
+ symbol_table = {}
+
+ while count:
+ count -= 1
+ b62count = self.base62(count)
+ symbol_table[b62count] = symbols[count] or b62count
+
+ self.sdk_code = re.sub(
+ r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
+ obfucasted_code)
+
+ def run(self, target, ip, timestamp):
+ self.decode_eval_codes()
+
+ functions = re.findall(r'input=([a-zA-Z0-9]+)\(input', self.sdk_code)
+
+ sdk = IqiyiSDK(target, ip, timestamp)
+
+ other_functions = {
+ 'handleSum': sdk.handleSum,
+ 'handleInput8': sdk.handle_input8,
+ 'handleInput16': sdk.handle_input16,
+ 'splitTimeEvenOdd': sdk.split_time_even_odd,
+ 'splitTimeOddEven': sdk.split_time_odd_even,
+ 'splitIpTimeSum': sdk.split_ip_time_sum,
+ 'splitTimeIpSum': sdk.split_time_ip_sum,
+ }
+ for function in functions:
+ if re.match(r'mod\d+', function):
+ sdk.mod(int(function[3:]))
+ elif re.match(r'date[ymd]{3}', function):
+ sdk.date(function[4:])
+ elif re.match(r'split\d+', function):
+ sdk.split(int(function[5:]))
+ elif function in other_functions:
+ other_functions[function]()
+ else:
+ raise ExtractorError('Unknown funcion %s' % function)
+
+ return sdk.target
class IqiyiIE(InfoExtractor):
IE_NAME = 'iqiyi'
IE_DESC = '爱奇艺'
- _VALID_URL = r'http://(?:www\.)iqiyi.com/v_.+?\.html'
+ _VALID_URL = r'http://(?:[^.]+\.)?iqiyi\.com/.+\.html'
+
+ _NETRC_MACHINE = 'iqiyi'
_TESTS = [{
'url': 'http://www.iqiyi.com/v_19rrojlavg.html',
'params': {
'skip_download': True,
},
+ }, {
+ 'url': 'http://www.iqiyi.com/w_19rt6o8t9p.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.iqiyi.com/a_19rrhbc6kt.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://yule.iqiyi.com/pcb.html',
+ 'only_matching': True,
+ }, {
+ # VIP-only video. The first 2 parts (6 minutes) are available without login
+ # MD5 sums omitted as values are different on Travis CI and my machine
+ 'url': 'http://www.iqiyi.com/v_19rrny4w8w.html',
+ 'info_dict': {
+ 'id': 'f3cf468b39dddb30d676f89a91200dc1',
+ 'title': '泰坦尼克号',
+ },
+ 'playlist': [{
+ 'info_dict': {
+ 'id': 'f3cf468b39dddb30d676f89a91200dc1_part1',
+ 'ext': 'f4v',
+ 'title': '泰坦尼克号',
+ },
+ }, {
+ 'info_dict': {
+ 'id': 'f3cf468b39dddb30d676f89a91200dc1_part2',
+ 'ext': 'f4v',
+ 'title': '泰坦尼克号',
+ },
+ }],
+ 'expected_warnings': ['Needs a VIP account for full video'],
+ }, {
+ 'url': 'http://www.iqiyi.com/a_19rrhb8ce1.html',
+ 'info_dict': {
+ 'id': '202918101',
+ 'title': '灌篮高手 国语版',
+ },
+ 'playlist_count': 101,
}]
_FORMATS_MAP = [
('10', 'h1'),
]
+ def _real_initialize(self):
+ self._login()
+
@staticmethod
- def md5_text(text):
- return hashlib.md5(text.encode('utf-8')).hexdigest()
+ def _rsa_fun(data):
+ # public key extracted from http://static.iqiyi.com/js/qiyiV2/20160129180840/jobs/i18n/i18nIndex.js
+ N = 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
+ e = 65537
+
+ return ohdave_rsa_encrypt(data, e, N)
+
+ def _login(self):
+ (username, password) = self._get_login_info()
+
+ # No authentication to be performed
+ if not username:
+ return True
+
+ data = self._download_json(
+ 'http://kylin.iqiyi.com/get_token', None,
+ note='Get token for logging', errnote='Unable to get token for logging')
+ sdk = data['sdk']
+ timestamp = int(time.time())
+ target = '/apis/reglogin/login.action?lang=zh_TW&area_code=null&email=%s&passwd=%s&agenttype=1&from=undefined&keeplogin=0&piccode=&fromurl=&_pos=1' % (
+ username, self._rsa_fun(password.encode('utf-8')))
+
+ interp = IqiyiSDKInterpreter(sdk)
+ sign = interp.run(target, data['ip'], timestamp)
+
+ validation_params = {
+ 'target': target,
+ 'server': 'BEA3AA1908656AABCCFF76582C4C6660',
+ 'token': data['token'],
+ 'bird_src': 'f8d91d57af224da7893dd397d52d811a',
+ 'sign': sign,
+ 'bird_t': timestamp,
+ }
+ validation_result = self._download_json(
+ 'http://kylin.iqiyi.com/validate?' + compat_urllib_parse.urlencode(validation_params), None,
+ note='Validate credentials', errnote='Unable to validate credentials')
+
+ MSG_MAP = {
+ 'P00107': 'please login via the web interface and enter the CAPTCHA code',
+ 'P00117': 'bad username or password',
+ }
+
+ code = validation_result['code']
+ if code != 'A00000':
+ msg = MSG_MAP.get(code)
+ if not msg:
+ msg = 'error %s' % code
+ if validation_result.get('msg'):
+ msg += ': ' + validation_result['msg']
+ self._downloader.report_warning('unable to log in: ' + msg)
+ return False
+
+ return True
+
+ def _authenticate_vip_video(self, api_video_url, video_id, tvid, _uuid, do_report_warning):
+ auth_params = {
+ # version and platform hard-coded in com/qiyi/player/core/model/remote/AuthenticationRemote.as
+ 'version': '2.0',
+ 'platform': 'b6c13e26323c537d',
+ 'aid': tvid,
+ 'tvid': tvid,
+ 'uid': '',
+ 'deviceId': _uuid,
+ 'playType': 'main', # XXX: always main?
+ 'filename': os.path.splitext(url_basename(api_video_url))[0],
+ }
- def construct_video_urls(self, data, video_id, _uuid):
+ qd_items = compat_parse_qs(compat_urllib_parse_urlparse(api_video_url).query)
+ for key, val in qd_items.items():
+ auth_params[key] = val[0]
+
+ auth_req = sanitized_Request(
+ 'http://api.vip.iqiyi.com/services/ckn.action',
+ urlencode_postdata(auth_params))
+ # iQiyi server throws HTTP 405 error without the following header
+ auth_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
+ auth_result = self._download_json(
+ auth_req, video_id,
+ note='Downloading video authentication JSON',
+ errnote='Unable to download video authentication JSON')
+ if auth_result['code'] == 'Q00506': # requires a VIP account
+ if do_report_warning:
+ self.report_warning('Needs a VIP account for full video')
+ return False
+
+ return auth_result
+
+ def construct_video_urls(self, data, video_id, _uuid, tvid):
def do_xor(x, y):
a = y % 3
if a == 1:
note='Download path key of segment %d for format %s' % (segment_index + 1, format_id)
)['t']
t = str(int(math.floor(int(tm) / (600.0))))
- return self.md5_text(t + mg + x)
+ return md5_text(t + mg + x)
video_urls_dict = {}
+ need_vip_warning_report = True
for format_item in data['vp']['tkl'][0]['vs']:
if 0 < int(format_item['bid']) <= 10:
format_id = self.get_format(format_item['bid'])
vl = segment['l']
if not vl.startswith('/'):
vl = get_encode_code(vl)
- key = get_path_key(
- vl.split('/')[-1].split('.')[0], format_id, segment_index)
+ is_vip_video = '/vip/' in vl
filesize = segment['b']
base_url = data['vp']['du'].split('/')
- base_url.insert(-1, key)
+ if not is_vip_video:
+ key = get_path_key(
+ vl.split('/')[-1].split('.')[0], format_id, segment_index)
+ base_url.insert(-1, key)
base_url = '/'.join(base_url)
param = {
'su': _uuid,
'ct': '',
'tn': str(int(time.time()))
}
- api_video_url = base_url + vl + '?' + \
- compat_urllib_parse.urlencode(param)
+ api_video_url = base_url + vl
+ if is_vip_video:
+ api_video_url = api_video_url.replace('.f4v', '.hml')
+ auth_result = self._authenticate_vip_video(
+ api_video_url, video_id, tvid, _uuid, need_vip_warning_report)
+ if auth_result is False:
+ need_vip_warning_report = False
+ break
+ param.update({
+ 't': auth_result['data']['t'],
+ # cid is hard-coded in com/qiyi/player/core/player/RuntimeData.as
+ 'cid': 'afbe8fd3d73448c9',
+ 'vid': video_id,
+ 'QY00001': auth_result['data']['u'],
+ })
+ api_video_url += '?' if '?' not in api_video_url else '&'
+ api_video_url += compat_urllib_parse.urlencode(param)
js = self._download_json(
api_video_url, video_id,
note='Download video info of segment %d for format %s' % (segment_index + 1, format_id))
tail = tm + tvid
param = {
'key': 'fvip',
- 'src': self.md5_text('youtube-dl'),
+ 'src': md5_text('youtube-dl'),
'tvId': tvid,
'vid': video_id,
'vinfo': 1,
'tm': tm,
- 'enc': self.md5_text(enc_key + tail),
+ 'enc': md5_text(enc_key + tail),
'qyid': _uuid,
'tn': random.random(),
'um': 0,
- 'authkey': self.md5_text(self.md5_text('') + tail),
+ 'authkey': md5_text(md5_text('') + tail),
+ 'k_tag': 1,
}
api_url = 'http://cache.video.qiyi.com/vms' + '?' + \
def get_enc_key(self, swf_url, video_id):
# TODO: automatic key extraction
- # last update at 2015-10-22 for Zombie::bite
- # '7223c67061dbea1259d0ceb44f44b6d62288f4f80c972170de5201d2321060270e05'[2:66][0::2]
- enc_key = '2c76de15dcb44bd28ff0927d50d31620'
+ # last update at 2016-01-22 for Zombie::bite
+ enc_key = '6ab6d0280511493ba85594779759d4ed'
return enc_key
+ def _extract_playlist(self, webpage):
+ PAGE_SIZE = 50
+
+ links = re.findall(
+ r'<a[^>]+class="site-piclist_pic_link"[^>]+href="(http://www\.iqiyi\.com/.+\.html)"',
+ webpage)
+ if not links:
+ return
+
+ album_id = self._search_regex(
+ r'albumId\s*:\s*(\d+),', webpage, 'album ID')
+ album_title = self._search_regex(
+ r'data-share-title="([^"]+)"', webpage, 'album title', fatal=False)
+
+ entries = list(map(self.url_result, links))
+
+ # Start from 2 because links in the first page are already on webpage
+ for page_num in itertools.count(2):
+ pagelist_page = self._download_webpage(
+ 'http://cache.video.qiyi.com/jp/avlist/%s/%d/%d/' % (album_id, page_num, PAGE_SIZE),
+ album_id,
+ note='Download playlist page %d' % page_num,
+ errnote='Failed to download playlist page %d' % page_num)
+ pagelist = self._parse_json(
+ remove_start(pagelist_page, 'var tvInfoJs='), album_id)
+ vlist = pagelist['data']['vlist']
+ for item in vlist:
+ entries.append(self.url_result(item['vurl']))
+ if len(vlist) < PAGE_SIZE:
+ break
+
+ return self.playlist_result(entries, album_id, album_title)
+
def _real_extract(self, url):
webpage = self._download_webpage(
url, 'temp_id', note='download video page')
+
+ # There's no simple way to determine whether an URL is a playlist or not
+ # So detect it
+ playlist_result = self._extract_playlist(webpage)
+ if playlist_result:
+ return playlist_result
+
tvid = self._search_regex(
r'data-player-tvid\s*=\s*[\'"](\d+)', webpage, 'tvid')
video_id = self._search_regex(
if raw_data['code'] != 'A000000':
raise ExtractorError('Unable to load data. Error code: ' + raw_data['code'])
- if not raw_data['data']['vp']['tkl']:
- raise ExtractorError('No support iQiqy VIP video')
-
data = raw_data['data']
title = data['vi']['vn']
# generate video_urls_dict
video_urls_dict = self.construct_video_urls(
- data, video_id, _uuid)
+ data, video_id, _uuid, tvid)
# construct info
entries = []
from .common import InfoExtractor
from ..utils import (
ExtractorError,
+ int_or_none,
sanitized_Request,
)
'title': 'Иван Васильевич меняет профессию',
'description': 'md5:b924063ea1677c8fe343d8a72ac2195f',
'duration': 5498,
- 'thumbnail': 'http://thumbs.ivi.ru/f20.vcp.digitalaccess.ru/contents/d/1/c3c885163a082c29bceeb7b5a267a6.jpg',
+ 'thumbnail': 're:^https?://.*\.jpg$',
},
'skip': 'Only works from Russia',
},
- # Serial's serie
+ # Serial's series
{
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/9549',
'md5': '221f56b35e3ed815fde2df71032f4b3e',
'info_dict': {
'id': '9549',
'ext': 'mp4',
- 'title': 'Двое из ларца - Серия 1',
+ 'title': 'Двое из ларца - Дело Гольдберга (1 часть)',
+ 'series': 'Двое из ларца',
+ 'season': 'Сезон 1',
+ 'season_number': 1,
+ 'episode': 'Дело Гольдберга (1 часть)',
+ 'episode_number': 1,
'duration': 2655,
- 'thumbnail': 'http://thumbs.ivi.ru/f15.vcp.digitalaccess.ru/contents/8/4/0068dc0677041f3336b7c2baad8fc0.jpg',
+ 'thumbnail': 're:^https?://.*\.jpg$',
},
'skip': 'Only works from Russia',
}
]
# Sorted by quality
- _known_formats = ['MP4-low-mobile', 'MP4-mobile', 'FLV-lo', 'MP4-lo', 'FLV-hi', 'MP4-hi', 'MP4-SHQ']
-
- # Sorted by size
- _known_thumbnails = ['Thumb-120x90', 'Thumb-160', 'Thumb-640x480']
-
- def _extract_description(self, html):
- m = re.search(r'<meta name="description" content="(?P<description>[^"]+)"/>', html)
- return m.group('description') if m is not None else None
-
- def _extract_comment_count(self, html):
- m = re.search('(?s)<a href="#" id="view-comments" class="action-button dim gradient">\s*Комментарии:\s*(?P<commentcount>\d+)\s*</a>', html)
- return int(m.group('commentcount')) if m is not None else 0
+ _KNOWN_FORMATS = ['MP4-low-mobile', 'MP4-mobile', 'FLV-lo', 'MP4-lo', 'FLV-hi', 'MP4-hi', 'MP4-SHQ']
def _real_extract(self, url):
video_id = self._match_id(url)
- api_url = 'http://api.digitalaccess.ru/api/json/'
-
data = {
'method': 'da.content.get',
'params': [
]
}
- request = sanitized_Request(api_url, json.dumps(data))
-
- video_json_page = self._download_webpage(
+ request = sanitized_Request(
+ 'http://api.digitalaccess.ru/api/json/', json.dumps(data))
+ video_json = self._download_json(
request, video_id, 'Downloading video JSON')
- video_json = json.loads(video_json_page)
if 'error' in video_json:
error = video_json['error']
formats = [{
'url': x['url'],
'format_id': x['content_format'],
- 'preference': self._known_formats.index(x['content_format']),
- } for x in result['files'] if x['content_format'] in self._known_formats]
+ 'preference': self._KNOWN_FORMATS.index(x['content_format']),
+ } for x in result['files'] if x['content_format'] in self._KNOWN_FORMATS]
self._sort_formats(formats)
- if not formats:
- raise ExtractorError('No media links available for %s' % video_id)
-
- duration = result['duration']
- compilation = result['compilation']
title = result['title']
+ duration = int_or_none(result.get('duration'))
+ compilation = result.get('compilation')
+ episode = title if compilation else None
+
title = '%s - %s' % (compilation, title) if compilation is not None else title
- previews = result['preview']
- previews.sort(key=lambda fmt: self._known_thumbnails.index(fmt['content_format']))
- thumbnail = previews[-1]['url'] if len(previews) > 0 else None
+ thumbnails = [{
+ 'url': preview['url'],
+ 'id': preview.get('content_format'),
+ } for preview in result.get('preview', []) if preview.get('url')]
+
+ webpage = self._download_webpage(url, video_id)
+
+ season = self._search_regex(
+ r'<li[^>]+class="season active"[^>]*><a[^>]+>([^<]+)',
+ webpage, 'season', default=None)
+ season_number = int_or_none(self._search_regex(
+ r'<li[^>]+class="season active"[^>]*><a[^>]+data-season(?:-index)?="(\d+)"',
+ webpage, 'season number', default=None))
+
+ episode_number = int_or_none(self._search_regex(
+ r'<meta[^>]+itemprop="episode"[^>]*>\s*<meta[^>]+itemprop="episodeNumber"[^>]+content="(\d+)',
+ webpage, 'episode number', default=None))
- video_page = self._download_webpage(url, video_id, 'Downloading video page')
- description = self._extract_description(video_page)
- comment_count = self._extract_comment_count(video_page)
+ description = self._og_search_description(webpage, default=None) or self._html_search_meta(
+ 'description', webpage, 'description', default=None)
return {
'id': video_id,
'title': title,
- 'thumbnail': thumbnail,
+ 'series': compilation,
+ 'season': season,
+ 'season_number': season_number,
+ 'episode': episode,
+ 'episode_number': episode_number,
+ 'thumbnails': thumbnails,
'description': description,
'duration': duration,
- 'comment_count': comment_count,
'formats': formats,
}
}]
def _extract_entries(self, html, compilation_id):
- return [self.url_result('http://www.ivi.ru/watch/%s/%s' % (compilation_id, serie), 'Ivi')
- for serie in re.findall(r'<strong><a href="/watch/%s/(\d+)">(?:[^<]+)</a></strong>' % compilation_id, html)]
+ return [
+ self.url_result(
+ 'http://www.ivi.ru/watch/%s/%s' % (compilation_id, serie), IviIE.ie_key())
+ for serie in re.findall(
+ r'<a href="/watch/%s/(\d+)"[^>]+data-id="\1"' % compilation_id, html)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
season_id = mobj.group('seasonid')
if season_id is not None: # Season link
- season_page = self._download_webpage(url, compilation_id, 'Downloading season %s web page' % season_id)
+ season_page = self._download_webpage(
+ url, compilation_id, 'Downloading season %s web page' % season_id)
playlist_id = '%s/season%s' % (compilation_id, season_id)
playlist_title = self._html_search_meta('title', season_page, 'title')
entries = self._extract_entries(season_page, compilation_id)
compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page')
playlist_id = compilation_id
playlist_title = self._html_search_meta('title', compilation_page, 'title')
- seasons = re.findall(r'<a href="/watch/%s/season(\d+)">[^<]+</a>' % compilation_id, compilation_page)
- if len(seasons) == 0: # No seasons in this compilation
+ seasons = re.findall(
+ r'<a href="/watch/%s/season(\d+)' % compilation_id, compilation_page)
+ if not seasons: # No seasons in this compilation
entries = self._extract_entries(compilation_page, compilation_id)
else:
entries = []
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_parse,
+ compat_urlparse,
+)
+from ..utils import qualities
+
+
+class IvideonIE(InfoExtractor):
+ IE_NAME = 'ivideon'
+ IE_DESC = 'Ivideon TV'
+ _VALID_URL = r'https?://(?:www\.)?ivideon\.com/tv/(?:[^/]+/)*camera/(?P<id>\d+-[\da-f]+)/(?P<camera_id>\d+)'
+ _TESTS = [{
+ 'url': 'https://www.ivideon.com/tv/camera/100-916ca13b5c4ad9f564266424a026386d/0/',
+ 'info_dict': {
+ 'id': '100-916ca13b5c4ad9f564266424a026386d',
+ 'ext': 'flv',
+ 'title': 're:^Касса [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
+ 'description': 'Основное предназначение - запись действий кассиров. Плюс общий вид.',
+ 'is_live': True,
+ },
+ 'params': {
+ 'skip_download': True,
+ }
+ }, {
+ 'url': 'https://www.ivideon.com/tv/camera/100-c4ee4cb9ede885cf62dfbe93d7b53783/589824/?lang=ru',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://www.ivideon.com/tv/map/22.917923/-31.816406/16/camera/100-e7bc16c7d4b5bbd633fd5350b66dfa9a/0',
+ 'only_matching': True,
+ }]
+
+ _QUALITIES = ('low', 'mid', 'hi')
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ server_id, camera_id = mobj.group('id'), mobj.group('camera_id')
+ camera_name, description = None, None
+ camera_url = compat_urlparse.urljoin(
+ url, '/tv/camera/%s/%s/' % (server_id, camera_id))
+
+ webpage = self._download_webpage(camera_url, server_id, fatal=False)
+ if webpage:
+ config_string = self._search_regex(
+ r'var\s+config\s*=\s*({.+?});', webpage, 'config', default=None)
+ if config_string:
+ config = self._parse_json(config_string, server_id, fatal=False)
+ camera_info = config.get('ivTvAppOptions', {}).get('currentCameraInfo')
+ if camera_info:
+ camera_name = camera_info.get('camera_name')
+ description = camera_info.get('misc', {}).get('description')
+ if not camera_name:
+ camera_name = self._html_search_meta(
+ 'name', webpage, 'camera name', default=None) or self._search_regex(
+ r'<h1[^>]+class="b-video-title"[^>]*>([^<]+)', webpage, 'camera name', default=None)
+
+ quality = qualities(self._QUALITIES)
+
+ formats = [{
+ 'url': 'https://streaming.ivideon.com/flv/live?%s' % compat_urllib_parse.urlencode({
+ 'server': server_id,
+ 'camera': camera_id,
+ 'sessionId': 'demo',
+ 'q': quality(format_id),
+ }),
+ 'format_id': format_id,
+ 'ext': 'flv',
+ 'quality': quality(format_id),
+ } for format_id in self._QUALITIES]
+ self._sort_formats(formats)
+
+ return {
+ 'id': server_id,
+ 'title': self._live_title(camera_name or server_id),
+ 'description': description,
+ 'is_live': True,
+ 'formats': formats,
+ }
+++ /dev/null
-from __future__ import unicode_literals
-
-import re
-
-from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
- RegexNotFoundError,
- unescapeHTML,
-)
-
-
-class JukeboxIE(InfoExtractor):
- _VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<id>[a-z0-9\-]+)\.html'
- _TEST = {
- 'url': 'http://www.jukebox.es/kosheen/videoclip,pride,r303r.html',
- 'info_dict': {
- 'id': 'r303r',
- 'ext': 'flv',
- 'title': 'Kosheen-En Vivo Pride',
- 'uploader': 'Kosheen',
- },
- }
-
- def _real_extract(self, url):
- video_id = self._match_id(url)
-
- html = self._download_webpage(url, video_id)
- iframe_url = unescapeHTML(self._search_regex(r'<iframe .*src="([^"]*)"', html, 'iframe url'))
-
- iframe_html = self._download_webpage(iframe_url, video_id, 'Downloading iframe')
- if re.search(r'class="jkb_waiting"', iframe_html) is not None:
- raise ExtractorError('Video is not available(in your country?)!')
-
- self.report_extraction(video_id)
-
- try:
- video_url = self._search_regex(r'"config":{"file":"(?P<video_url>http:[^"]+\?mdtk=[0-9]+)"',
- iframe_html, 'video url')
- video_url = unescapeHTML(video_url).replace('\/', '/')
- except RegexNotFoundError:
- youtube_url = self._search_regex(
- r'config":{"file":"(http:\\/\\/www\.youtube\.com\\/watch\?v=[^"]+)"',
- iframe_html, 'youtube url')
- youtube_url = unescapeHTML(youtube_url).replace('\/', '/')
- self.to_screen('Youtube video detected')
- return self.url_result(youtube_url, ie='Youtube')
-
- title = self._html_search_regex(r'<h1 class="inline">([^<]+)</h1>',
- html, 'title')
- artist = self._html_search_regex(r'<span id="infos_article_artist">([^<]+)</span>',
- html, 'artist')
-
- return {
- 'id': video_id,
- 'url': video_url,
- 'title': artist + '-' + title,
- 'uploader': artist,
- }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import int_or_none
+
+
+class JWPlatformIE(InfoExtractor):
+ _VALID_URL = r'(?:https?://content\.jwplatform\.com/(?:feeds|players|jw6)/|jwplatform:)(?P<id>[a-zA-Z0-9]{8})'
+ _TEST = {
+ 'url': 'http://content.jwplatform.com/players/nPripu9l-ALJ3XQCI.js',
+ 'md5': 'fa8899fa601eb7c83a64e9d568bdf325',
+ 'info_dict': {
+ 'id': 'nPripu9l',
+ 'ext': 'mov',
+ 'title': 'Big Buck Bunny Trailer',
+ 'description': 'Big Buck Bunny is a short animated film by the Blender Institute. It is made using free and open source software.',
+ 'upload_date': '20081127',
+ 'timestamp': 1227796140,
+ }
+ }
+
+ @staticmethod
+ def _extract_url(webpage):
+ mobj = re.search(
+ r'<script[^>]+?src=["\'](?P<url>(?:https?:)?//content.jwplatform.com/players/[a-zA-Z0-9]{8})',
+ webpage)
+ if mobj:
+ return mobj.group('url')
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ json_data = self._download_json('http://content.jwplatform.com/feeds/%s.json' % video_id, video_id)
+ video_data = json_data['playlist'][0]
+ subtitles = {}
+ for track in video_data['tracks']:
+ if track['kind'] == 'captions':
+ subtitles[track['label']] = [{'url': self._proto_relative_url(track['file'])}]
+
+ formats = []
+ for source in video_data['sources']:
+ source_url = self._proto_relative_url(source['file'])
+ source_type = source.get('type') or ''
+ if source_type == 'application/vnd.apple.mpegurl':
+ formats.extend(self._extract_m3u8_formats(
+ source_url, video_id, 'mp4', 'm3u8_native', fatal=False))
+ elif source_type.startswith('audio'):
+ formats.append({
+ 'url': source_url,
+ 'vcodec': 'none',
+ })
+ else:
+ formats.append({
+ 'url': source_url,
+ 'width': int_or_none(source.get('width')),
+ 'height': int_or_none(source.get('height')),
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': video_data['title'],
+ 'description': video_data.get('description'),
+ 'thumbnail': self._proto_relative_url(video_data.get('image')),
+ 'timestamp': int_or_none(video_data.get('pubdate')),
+ 'subtitles': subtitles,
+ 'formats': formats,
+ }
'info_dict': {
'id': '1_1jc2y3e4',
'ext': 'mp4',
- 'title': 'Track 4',
+ 'title': 'Straight from the Heart',
'upload_date': '20131219',
'uploader_id': 'mlundberg@wolfgangsvault.com',
'description': 'The Allman Brothers Band, 12/16/1981',
'version': '-1',
},
{
- 'action': 'getContextData',
- 'contextDataParams:objectType': 'KalturaEntryContextDataParams',
- 'contextDataParams:referrer': 'http://www.kaltura.com/',
- 'contextDataParams:streamerType': 'http',
+ 'action': 'getbyentryid',
'entryId': video_id,
- 'service': 'baseentry',
+ 'service': 'flavorAsset',
},
]
return self._kaltura_api_call(
partner_id = mobj.group('partner_id_s') or mobj.group('partner_id') or mobj.group('partner_id_html5')
entry_id = mobj.group('id_s') or mobj.group('id') or mobj.group('id_html5')
- info, source_data = self._get_video_info(entry_id, partner_id)
+ info, flavor_assets = self._get_video_info(entry_id, partner_id)
source_url = smuggled_data.get('source_url')
if source_url:
referrer = None
formats = []
- for f in source_data['flavorAssets']:
+ for f in flavor_assets:
+ # Continue if asset is not ready
+ if f['status'] != 2:
+ continue
video_url = '%s/flavorId/%s' % (info['dataUrl'], f['id'])
if referrer:
video_url += '?referrer=%s' % referrer
'width': int_or_none(f.get('width')),
'url': video_url,
})
+ m3u8_url = info['dataUrl'].replace('format/url', 'format/applehttp')
+ if referrer:
+ m3u8_url += '?referrer=%s' % referrer
+ formats.extend(self._extract_m3u8_formats(
+ m3u8_url, entry_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
+
self._check_formats(formats, entry_id)
self._sort_formats(formats)
subs = self._download_json(
'http://www.kanal%splay.se/api/subtitles/%s' % (channel_id, video_id),
video_id, 'Downloading subtitles JSON', fatal=False)
- return {'se': [{'ext': 'srt', 'data': self._fix_subtitles(subs)}]} if subs else {}
+ return {'sv': [{'ext': 'srt', 'data': self._fix_subtitles(subs)}]} if subs else {}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title = self._search_regex(r'(?:G_TITLE=|G_MOVIE_TITLE = )[\'"](.+?)[\'"]', webpage, 'video title')
surls = re.search(r'surls:\[\'.+?\'\]|lurl:\'.+?\.flv\'', webpage).group(0)
- gcids = re.findall(r"http://.+?/.+?/(.+?)/", surls)
+ gcids = re.findall(r'http://.+?/.+?/(.+?)/', surls)
gcid = gcids[-1]
info_url = 'http://p2s.cl.kankan.com/getCdnresource_flv?gcid=%s' % gcid
from __future__ import unicode_literals
-import os
import re
from .common import InfoExtractor
-from ..compat import compat_urllib_parse_urlparse
-from ..utils import sanitized_Request
+from ..utils import (
+ sanitized_Request,
+ url_basename,
+)
class KeezMoviesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?keezmovies\.com/video/.+?(?P<id>[0-9]+)(?:[/?&]|$)'
_TEST = {
'url': 'http://www.keezmovies.com/video/petite-asian-lady-mai-playing-in-bathtub-1214711',
- 'md5': '6e297b7e789329923fcf83abb67c9289',
+ 'md5': '1c1e75d22ffa53320f45eeb07bc4cdc0',
'info_dict': {
'id': '1214711',
'ext': 'mp4',
'title': 'Petite Asian Lady Mai Playing In Bathtub',
'age_limit': 18,
+ 'thumbnail': 're:^https?://.*\.jpg$',
}
}
video_title = self._html_search_regex(
r'<h1 [^>]*>([^<]+)', webpage, 'title')
- video_url = self._html_search_regex(
- r'(?s)html5VideoPlayer = .*?src="([^"]+)"', webpage, 'video URL')
- path = compat_urllib_parse_urlparse(video_url).path
- extension = os.path.splitext(path)[1][1:]
- format = path.split('/')[4].split('_')[:2]
- format = "-".join(format)
+ flashvars = self._parse_json(self._search_regex(
+ r'var\s+flashvars\s*=\s*([^;]+);', webpage, 'flashvars'), video_id)
+
+ formats = []
+ for height in (180, 240, 480):
+ if flashvars.get('quality_%dp' % height):
+ video_url = flashvars['quality_%dp' % height]
+ a_format = {
+ 'url': video_url,
+ 'height': height,
+ 'format_id': '%dp' % height,
+ }
+ filename_parts = url_basename(video_url).split('_')
+ if len(filename_parts) >= 2 and re.match(r'\d+[Kk]', filename_parts[1]):
+ a_format['tbr'] = int(filename_parts[1][:-1])
+ formats.append(a_format)
age_limit = self._rta_search(webpage)
return {
'id': video_id,
'title': video_title,
- 'url': video_url,
- 'ext': extension,
- 'format': format,
- 'format_id': format,
+ 'formats': formats,
'age_limit': age_limit,
+ 'thumbnail': flashvars.get('image_url')
}
from __future__ import unicode_literals
from .common import InfoExtractor
+from ..utils import smuggle_url
class KickStarterIE(InfoExtractor):
_VALID_URL = r'https?://www\.kickstarter\.com/projects/(?P<id>[^/]*)/.*'
_TESTS = [{
- 'url': 'https://www.kickstarter.com/projects/1404461844/intersection-the-story-of-josh-grant?ref=home_location',
+ 'url': 'https://www.kickstarter.com/projects/1404461844/intersection-the-story-of-josh-grant/description',
'md5': 'c81addca81327ffa66c642b5d8b08cab',
'info_dict': {
'id': '1404461844',
'uploader_id': 'pebble',
'uploader': 'Pebble Technology',
'title': 'Pebble iOS Notifications',
- }
+ },
+ 'add_ie': ['Vimeo'],
}, {
'url': 'https://www.kickstarter.com/projects/1420158244/power-drive-2000/widget/video.html',
'info_dict': {
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
- r'<title>\s*(.*?)(?:\s*— Kickstarter)?\s*</title>',
+ r'<title>\s*(.*?)(?:\s*—\s*Kickstarter)?\s*</title>',
webpage, 'title')
video_url = self._search_regex(
r'data-video-url="(.*?)"',
return {
'_type': 'url_transparent',
'ie_key': 'Generic',
- 'url': url,
+ 'url': smuggle_url(url, {'to_generic': True}),
'title': title,
}
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+ float_or_none,
+ int_or_none,
+)
+
+
+class KonserthusetPlayIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?konserthusetplay\.se/\?.*\bm=(?P<id>[^&]+)'
+ _TEST = {
+ 'url': 'http://www.konserthusetplay.se/?m=CKDDnlCY-dhWAAqiMERd-A',
+ 'info_dict': {
+ 'id': 'CKDDnlCY-dhWAAqiMERd-A',
+ 'ext': 'flv',
+ 'title': 'Orkesterns instrument: Valthornen',
+ 'description': 'md5:f10e1f0030202020396a4d712d2fa827',
+ 'thumbnail': 're:^https?://.*$',
+ 'duration': 398.8,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ e = self._search_regex(
+ r'https?://csp\.picsearch\.com/rest\?.*\be=(.+?)[&"\']', webpage, 'e')
+
+ rest = self._download_json(
+ 'http://csp.picsearch.com/rest?e=%s&containerId=mediaplayer&i=object' % e,
+ video_id, transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1])
+
+ media = rest['media']
+ player_config = media['playerconfig']
+ playlist = player_config['playlist']
+
+ source = next(f for f in playlist if f.get('bitrates'))
+
+ FORMAT_ID_REGEX = r'_([^_]+)_h264m\.mp4'
+
+ formats = []
+
+ fallback_url = source.get('fallbackUrl')
+ fallback_format_id = None
+ if fallback_url:
+ fallback_format_id = self._search_regex(
+ FORMAT_ID_REGEX, fallback_url, 'format id', default=None)
+
+ connection_url = (player_config.get('rtmp', {}).get(
+ 'netConnectionUrl') or player_config.get(
+ 'plugins', {}).get('bwcheck', {}).get('netConnectionUrl'))
+ if connection_url:
+ for f in source['bitrates']:
+ video_url = f.get('url')
+ if not video_url:
+ continue
+ format_id = self._search_regex(
+ FORMAT_ID_REGEX, video_url, 'format id', default=None)
+ f_common = {
+ 'vbr': int_or_none(f.get('bitrate')),
+ 'width': int_or_none(f.get('width')),
+ 'height': int_or_none(f.get('height')),
+ }
+ f = f_common.copy()
+ f.update({
+ 'url': connection_url,
+ 'play_path': video_url,
+ 'format_id': 'rtmp-%s' % format_id if format_id else 'rtmp',
+ 'ext': 'flv',
+ })
+ formats.append(f)
+ if format_id and format_id == fallback_format_id:
+ f = f_common.copy()
+ f.update({
+ 'url': fallback_url,
+ 'format_id': 'http-%s' % format_id if format_id else 'http',
+ })
+ formats.append(f)
+
+ if not formats and fallback_url:
+ formats.append({
+ 'url': fallback_url,
+ })
+
+ self._sort_formats(formats)
+
+ title = player_config.get('title') or media['title']
+ description = player_config.get('mediaInfo', {}).get('description')
+ thumbnail = media.get('image')
+ duration = float_or_none(media.get('duration'), 1000)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'formats': formats,
+ }
(file_format['ext'], file_format.get('br', ''), song_id),
song_id, note='Download %s url info' % file_format['format'],
)
+
+ if song_url == 'IPDeny':
+ raise ExtractorError('This song is blocked in this region', expected=True)
+
if song_url.startswith('http://') or song_url.startswith('https://'):
formats.append({
'url': song_url,
-# -*- coding: utf-8 -*-
+# coding: utf-8
from __future__ import unicode_literals
-import random
import re
from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_parse,
+ compat_urlparse,
+)
from ..utils import (
ExtractorError,
+ sanitized_Request,
+ unified_strdate,
+ urlencode_postdata,
+ xpath_element,
xpath_text,
)
class Laola1TvIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?laola1\.tv/(?P<lang>[a-z]+)-(?P<portal>[a-z]+)/.*?/(?P<id>[0-9]+)\.html'
- _TEST = {
+ _VALID_URL = r'https?://(?:www\.)?laola1\.tv/(?P<lang>[a-z]+)-(?P<portal>[a-z]+)/[^/]+/(?P<slug>[^/?#&]+)'
+ _TESTS = [{
'url': 'http://www.laola1.tv/de-de/video/straubing-tigers-koelner-haie/227883.html',
'info_dict': {
'id': '227883',
- 'ext': 'mp4',
+ 'display_id': 'straubing-tigers-koelner-haie',
+ 'ext': 'flv',
'title': 'Straubing Tigers - Kölner Haie',
+ 'upload_date': '20140912',
+ 'is_live': False,
'categories': ['Eishockey'],
+ },
+ 'params': {
+ 'skip_download': True,
+ }
+ }, {
+ 'url': 'http://www.laola1.tv/de-de/video/straubing-tigers-koelner-haie',
+ 'info_dict': {
+ 'id': '464602',
+ 'display_id': 'straubing-tigers-koelner-haie',
+ 'ext': 'flv',
+ 'title': 'Straubing Tigers - Kölner Haie',
+ 'upload_date': '20160129',
'is_live': False,
+ 'categories': ['Eishockey'],
},
'params': {
'skip_download': True,
}
- }
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ display_id = mobj.group('slug')
lang = mobj.group('lang')
portal = mobj.group('portal')
- webpage = self._download_webpage(url, video_id)
+ webpage = self._download_webpage(url, display_id)
+
iframe_url = self._search_regex(
- r'<iframe[^>]*?class="main_tv_player"[^>]*?src="([^"]+)"',
- webpage, 'iframe URL')
+ r'<iframe[^>]*?id="videoplayer"[^>]*?src="([^"]+)"',
+ webpage, 'iframe url')
+
+ video_id = self._search_regex(
+ r'videoid=(\d+)', iframe_url, 'video id')
- iframe = self._download_webpage(
- iframe_url, video_id, note='Downloading iframe')
- flashvars_m = re.findall(
- r'flashvars\.([_a-zA-Z0-9]+)\s*=\s*"([^"]*)";', iframe)
- flashvars = dict((m[0], m[1]) for m in flashvars_m)
+ iframe = self._download_webpage(compat_urlparse.urljoin(
+ url, iframe_url), display_id, 'Downloading iframe')
partner_id = self._search_regex(
- r'partnerid\s*:\s*"([^"]+)"', iframe, 'partner id')
-
- xml_url = ('http://www.laola1.tv/server/hd_video.php?' +
- 'play=%s&partner=%s&portal=%s&v5ident=&lang=%s' % (
- video_id, partner_id, portal, lang))
- hd_doc = self._download_xml(xml_url, video_id)
-
- title = xpath_text(hd_doc, './/video/title', fatal=True)
- flash_url = xpath_text(hd_doc, './/video/url', fatal=True)
- uploader = xpath_text(hd_doc, './/video/meta_organistation')
- is_live = xpath_text(hd_doc, './/video/islive') == 'true'
-
- categories = xpath_text(hd_doc, './/video/meta_sports')
- if categories:
- categories = categories.split(',')
-
- ident = random.randint(10000000, 99999999)
- token_url = '%s&ident=%s&klub=0&unikey=0×tamp=%s&auth=%s' % (
- flash_url, ident, flashvars['timestamp'], flashvars['auth'])
-
- token_doc = self._download_xml(
- token_url, video_id, note='Downloading token')
- token_attrib = token_doc.find('.//token').attrib
- if token_attrib.get('auth') in ('blocked', 'restricted'):
+ r'partnerid\s*:\s*(["\'])(?P<partner_id>.+?)\1',
+ iframe, 'partner id', group='partner_id')
+
+ hd_doc = self._download_xml(
+ 'http://www.laola1.tv/server/hd_video.php?%s'
+ % compat_urllib_parse.urlencode({
+ 'play': video_id,
+ 'partner': partner_id,
+ 'portal': portal,
+ 'lang': lang,
+ 'v5ident': '',
+ }), display_id)
+
+ _v = lambda x, **k: xpath_text(hd_doc, './/video/' + x, **k)
+ title = _v('title', fatal=True)
+
+ req = sanitized_Request(
+ 'https://club.laola1.tv/sp/laola1/api/v3/user/session/premium/player/stream-access?%s' %
+ compat_urllib_parse.urlencode({
+ 'videoId': video_id,
+ 'target': '2',
+ 'label': 'laola1tv',
+ 'area': _v('area'),
+ }),
+ urlencode_postdata(
+ dict((i, v) for i, v in enumerate(_v('req_liga_abos').split(',')))))
+
+ token_url = self._download_json(req, display_id)['data']['stream-access'][0]
+ token_doc = self._download_xml(token_url, display_id, 'Downloading token')
+
+ token_attrib = xpath_element(token_doc, './/token').attrib
+ token_auth = token_attrib['auth']
+
+ if token_auth in ('blocked', 'restricted', 'error'):
raise ExtractorError(
- 'Token error: %s' % token_attrib.get('comment'), expected=True)
+ 'Token error: %s' % token_attrib['comment'], expected=True)
+
+ formats = self._extract_f4m_formats(
+ '%s?hdnea=%s&hdcore=3.2.0' % (token_attrib['url'], token_auth),
+ video_id, f4m_id='hds')
- video_url = '%s?hdnea=%s&hdcore=3.2.0' % (
- token_attrib['url'], token_attrib['auth'])
+ categories_str = _v('meta_sports')
+ categories = categories_str.split(',') if categories_str else []
return {
'id': video_id,
- 'is_live': is_live,
+ 'display_id': display_id,
'title': title,
- 'url': video_url,
- 'uploader': uploader,
+ 'upload_date': unified_strdate(_v('time_date')),
+ 'uploader': _v('meta_organisation'),
'categories': categories,
- 'ext': 'mp4',
+ 'is_live': _v('islive') == 'true',
+ 'formats': formats,
}
--- /dev/null
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class LemondeIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:.+?\.)?lemonde\.fr/(?:[^/]+/)*(?P<id>[^/]+)\.html'
+ _TESTS = [{
+ 'url': 'http://www.lemonde.fr/police-justice/video/2016/01/19/comprendre-l-affaire-bygmalion-en-cinq-minutes_4849702_1653578.html',
+ 'md5': '01fb3c92de4c12c573343d63e163d302',
+ 'info_dict': {
+ 'id': 'lqm3kl',
+ 'ext': 'mp4',
+ 'title': "Comprendre l'affaire Bygmalion en 5 minutes",
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 320,
+ 'upload_date': '20160119',
+ 'timestamp': 1453194778,
+ 'uploader_id': '3pmkp',
+ },
+ }, {
+ 'url': 'http://redaction.actu.lemonde.fr/societe/video/2016/01/18/calais-debut-des-travaux-de-defrichement-dans-la-jungle_4849233_3224.html',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, display_id)
+
+ digiteka_url = self._proto_relative_url(self._search_regex(
+ r'url\s*:\s*(["\'])(?P<url>(?:https?://)?//(?:www\.)?(?:digiteka\.net|ultimedia\.com)/deliver/.+?)\1',
+ webpage, 'digiteka url', group='url'))
+ return self.url_result(digiteka_url, 'Digiteka')
import datetime
import re
import time
+import base64
+import hashlib
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_ord,
+ compat_str,
)
from ..utils import (
determine_ext,
parse_iso8601,
sanitized_Request,
int_or_none,
+ str_or_none,
encode_data_uri,
+ url_basename,
)
},
'playlist_mincount': 7
}]
+
+
+class LetvCloudIE(InfoExtractor):
+ IE_DESC = '乐视云'
+ _VALID_URL = r'https?://yuntv\.letv\.com/bcloud.html\?.+'
+
+ _TESTS = [{
+ 'url': 'http://yuntv.letv.com/bcloud.html?uu=p7jnfw5hw9&vu=467623dedf',
+ 'md5': '26450599afd64c513bc77030ad15db44',
+ 'info_dict': {
+ 'id': 'p7jnfw5hw9_467623dedf',
+ 'ext': 'mp4',
+ 'title': 'Video p7jnfw5hw9_467623dedf',
+ },
+ }, {
+ 'url': 'http://yuntv.letv.com/bcloud.html?uu=p7jnfw5hw9&vu=ec93197892&pu=2c7cd40209&auto_play=1&gpcflag=1&width=640&height=360',
+ 'md5': 'e03d9cc8d9c13191e1caf277e42dbd31',
+ 'info_dict': {
+ 'id': 'p7jnfw5hw9_ec93197892',
+ 'ext': 'mp4',
+ 'title': 'Video p7jnfw5hw9_ec93197892',
+ },
+ }, {
+ 'url': 'http://yuntv.letv.com/bcloud.html?uu=p7jnfw5hw9&vu=187060b6fd',
+ 'md5': 'cb988699a776b22d4a41b9d43acfb3ac',
+ 'info_dict': {
+ 'id': 'p7jnfw5hw9_187060b6fd',
+ 'ext': 'mp4',
+ 'title': 'Video p7jnfw5hw9_187060b6fd',
+ },
+ }]
+
+ @staticmethod
+ def sign_data(obj):
+ if obj['cf'] == 'flash':
+ salt = '2f9d6924b33a165a6d8b5d3d42f4f987'
+ items = ['cf', 'format', 'ran', 'uu', 'ver', 'vu']
+ elif obj['cf'] == 'html5':
+ salt = 'fbeh5player12c43eccf2bec3300344'
+ items = ['cf', 'ran', 'uu', 'bver', 'vu']
+ input_data = ''.join([item + obj[item] for item in items]) + salt
+ obj['sign'] = hashlib.md5(input_data.encode('utf-8')).hexdigest()
+
+ def _get_formats(self, cf, uu, vu, media_id):
+ def get_play_json(cf, timestamp):
+ data = {
+ 'cf': cf,
+ 'ver': '2.2',
+ 'bver': 'firefox44.0',
+ 'format': 'json',
+ 'uu': uu,
+ 'vu': vu,
+ 'ran': compat_str(timestamp),
+ }
+ self.sign_data(data)
+ return self._download_json(
+ 'http://api.letvcloud.com/gpc.php?' + compat_urllib_parse.urlencode(data),
+ media_id, 'Downloading playJson data for type %s' % cf)
+
+ play_json = get_play_json(cf, time.time())
+ # The server time may be different from local time
+ if play_json.get('code') == 10071:
+ play_json = get_play_json(cf, play_json['timestamp'])
+
+ if not play_json.get('data'):
+ if play_json.get('message'):
+ raise ExtractorError('Letv cloud said: %s' % play_json['message'], expected=True)
+ elif play_json.get('code'):
+ raise ExtractorError('Letv cloud returned error %d' % play_json['code'], expected=True)
+ else:
+ raise ExtractorError('Letv cloud returned an unknwon error')
+
+ def b64decode(s):
+ return base64.b64decode(s.encode('utf-8')).decode('utf-8')
+
+ formats = []
+ for media in play_json['data']['video_info']['media'].values():
+ play_url = media['play_url']
+ url = b64decode(play_url['main_url'])
+ decoded_url = b64decode(url_basename(url))
+ formats.append({
+ 'url': url,
+ 'ext': determine_ext(decoded_url),
+ 'format_id': int_or_none(play_url.get('vtype')),
+ 'format_note': str_or_none(play_url.get('definition')),
+ 'width': int_or_none(play_url.get('vwidth')),
+ 'height': int_or_none(play_url.get('vheight')),
+ })
+
+ return formats
+
+ def _real_extract(self, url):
+ uu_mobj = re.search('uu=([\w]+)', url)
+ vu_mobj = re.search('vu=([\w]+)', url)
+
+ if not uu_mobj or not vu_mobj:
+ raise ExtractorError('Invalid URL: %s' % url, expected=True)
+
+ uu = uu_mobj.group(1)
+ vu = vu_mobj.group(1)
+ media_id = uu + '_' + vu
+
+ formats = self._get_formats('flash', uu, vu, media_id) + self._get_formats('html5', uu, vu, media_id)
+ self._sort_formats(formats)
+
+ return {
+ 'id': media_id,
+ 'title': 'Video %s' % media_id,
+ 'formats': formats,
+ }
if not stream_url:
continue
if '.f4m' in stream_url:
- formats.extend(self._extract_f4m_formats(stream_url, video_id))
+ formats.extend(self._extract_f4m_formats(
+ stream_url, video_id, fatal=False))
else:
fmt = {
'url': stream_url,
format_id = mobile_url.get('targetMediaPlatform')
if determine_ext(media_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
- media_url, video_id, 'mp4', entry_protocol='m3u8_native',
- preference=-1, m3u8_id=format_id))
+ media_url, video_id, 'mp4', 'm3u8_native',
+ m3u8_id=format_id, fatal=False))
else:
formats.append({
'url': media_url,
'info_dict': {
'id': '801_1409392012',
'ext': 'mp4',
- 'description': "Happened on 27.7.2014. \r\nAt 0:53 you can see people still swimming at near beach.",
+ 'description': 'Happened on 27.7.2014. \r\nAt 0:53 you can see people still swimming at near beach.',
'uploader': 'bony333',
'title': 'Crazy Hungarian tourist films close call waterspout in Croatia'
}
from __future__ import unicode_literals
import re
-import json
import itertools
from .common import InfoExtractor
from ..compat import (
compat_str,
- compat_urllib_parse_urlparse,
compat_urlparse,
)
from ..utils import (
- ExtractorError,
find_xpath_attr,
- int_or_none,
- orderedSet,
+ xpath_attr,
xpath_with_ns,
+ xpath_text,
+ orderedSet,
+ int_or_none,
+ float_or_none,
+ parse_iso8601,
+ determine_ext,
)
class LivestreamIE(InfoExtractor):
IE_NAME = 'livestream'
- _VALID_URL = r'https?://(?:new\.)?livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>[0-9]+)(?:/player)?)?/?(?:$|[?#])'
+ _VALID_URL = r'https?://(?:new\.)?livestream\.com/(?:accounts/(?P<account_id>\d+)|(?P<account_name>[^/]+))/(?:events/(?P<event_id>\d+)|(?P<event_name>[^/]+))(?:/videos/(?P<id>\d+))?'
_TESTS = [{
'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370',
'md5': '53274c76ba7754fb0e8d072716f2292b',
'id': '4719370',
'ext': 'mp4',
'title': 'Live from Webster Hall NYC',
+ 'timestamp': 1350008072,
'upload_date': '20121012',
+ 'duration': 5968.0,
'like_count': int,
'view_count': int,
'thumbnail': 're:^http://.*\.jpg$'
'url': 'http://livestream.com/bsww/concacafbeachsoccercampeonato2015',
'only_matching': True,
}]
+ _API_URL_TEMPLATE = 'http://livestream.com/api/accounts/%s/events/%s'
+
+ def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
+ base_ele = find_xpath_attr(
+ smil, self._xpath_ns('.//meta', namespace), 'name', 'httpBase')
+ base = base_ele.get('content') if base_ele else 'http://livestreamvod-f.akamaihd.net/'
- def _parse_smil(self, video_id, smil_url):
formats = []
- _SWITCH_XPATH = (
- './/{http://www.w3.org/2001/SMIL20/Language}body/'
- '{http://www.w3.org/2001/SMIL20/Language}switch')
- smil_doc = self._download_xml(
- smil_url, video_id,
- note='Downloading SMIL information',
- errnote='Unable to download SMIL information',
- fatal=False)
- if smil_doc is False: # Download failed
- return formats
- title_node = find_xpath_attr(
- smil_doc, './/{http://www.w3.org/2001/SMIL20/Language}meta',
- 'name', 'title')
- if title_node is None:
- self.report_warning('Cannot find SMIL id')
- switch_node = smil_doc.find(_SWITCH_XPATH)
- else:
- title_id = title_node.attrib['content']
- switch_node = find_xpath_attr(
- smil_doc, _SWITCH_XPATH, 'id', title_id)
- if switch_node is None:
- raise ExtractorError('Cannot find switch node')
- video_nodes = switch_node.findall(
- '{http://www.w3.org/2001/SMIL20/Language}video')
+ video_nodes = smil.findall(self._xpath_ns('.//video', namespace))
for vn in video_nodes:
- tbr = int_or_none(vn.attrib.get('system-bitrate'))
+ tbr = int_or_none(vn.attrib.get('system-bitrate'), 1000)
furl = (
- 'http://livestream-f.akamaihd.net/%s?v=3.0.3&fp=WIN%%2014,0,0,145' %
- (vn.attrib['src']))
+ '%s%s?v=3.0.3&fp=WIN%%2014,0,0,145' % (base, vn.attrib['src']))
if 'clipBegin' in vn.attrib:
furl += '&ssek=' + vn.attrib['clipBegin']
formats.append({
('sd', 'progressive_url'),
('hd', 'progressive_url_hd'),
)
- formats = [{
- 'format_id': format_id,
- 'url': video_data[key],
- 'quality': i + 1,
- } for i, (format_id, key) in enumerate(FORMAT_KEYS)
- if video_data.get(key)]
+
+ formats = []
+ for format_id, key in FORMAT_KEYS:
+ video_url = video_data.get(key)
+ if video_url:
+ ext = determine_ext(video_url)
+ if ext == 'm3u8':
+ continue
+ bitrate = int_or_none(self._search_regex(
+ r'(\d+)\.%s' % ext, video_url, 'bitrate', default=None))
+ formats.append({
+ 'url': video_url,
+ 'format_id': format_id,
+ 'tbr': bitrate,
+ 'ext': ext,
+ })
smil_url = video_data.get('smil_url')
if smil_url:
- formats.extend(self._parse_smil(video_id, smil_url))
+ formats.extend(self._extract_smil_formats(smil_url, video_id))
+
+ m3u8_url = video_data.get('m3u8_url')
+ if m3u8_url:
+ formats.extend(self._extract_m3u8_formats(
+ m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
+
+ f4m_url = video_data.get('f4m_url')
+ if f4m_url:
+ formats.extend(self._extract_f4m_formats(
+ f4m_url, video_id, f4m_id='hds', fatal=False))
self._sort_formats(formats)
+ comments = [{
+ 'author_id': comment.get('author_id'),
+ 'author': comment.get('author', {}).get('full_name'),
+ 'id': comment.get('id'),
+ 'text': comment['text'],
+ 'timestamp': parse_iso8601(comment.get('created_at')),
+ } for comment in video_data.get('comments', {}).get('data', [])]
+
return {
'id': video_id,
'formats': formats,
'title': video_data['caption'],
+ 'description': video_data.get('description'),
'thumbnail': video_data.get('thumbnail_url'),
- 'upload_date': video_data['updated_at'].replace('-', '')[:8],
+ 'duration': float_or_none(video_data.get('duration'), 1000),
+ 'timestamp': parse_iso8601(video_data.get('publish_at')),
'like_count': video_data.get('likes', {}).get('total'),
+ 'comment_count': video_data.get('comments', {}).get('total'),
'view_count': video_data.get('views'),
+ 'comments': comments,
}
- def _extract_event(self, info):
- event_id = compat_str(info['id'])
- account = compat_str(info['owner_account_id'])
- root_url = (
- 'https://new.livestream.com/api/accounts/{account}/events/{event}/'
- 'feed.json'.format(account=account, event=event_id))
-
- def _extract_videos():
- last_video = None
- for i in itertools.count(1):
- if last_video is None:
- info_url = root_url
- else:
- info_url = '{root}?&id={id}&newer=-1&type=video'.format(
- root=root_url, id=last_video)
- videos_info = self._download_json(info_url, event_id, 'Downloading page {0}'.format(i))['data']
- videos_info = [v['data'] for v in videos_info if v['type'] == 'video']
- if not videos_info:
- break
- for v in videos_info:
- yield self._extract_video_info(v)
- last_video = videos_info[-1]['id']
- return self.playlist_result(_extract_videos(), event_id, info['full_name'])
+ def _extract_stream_info(self, stream_info):
+ broadcast_id = stream_info['broadcast_id']
+ is_live = stream_info.get('is_live')
+
+ formats = []
+ smil_url = stream_info.get('play_url')
+ if smil_url:
+ formats.extend(self._extract_smil_formats(smil_url, broadcast_id))
+
+ entry_protocol = 'm3u8' if is_live else 'm3u8_native'
+ m3u8_url = stream_info.get('m3u8_url')
+ if m3u8_url:
+ formats.extend(self._extract_m3u8_formats(
+ m3u8_url, broadcast_id, 'mp4', entry_protocol, m3u8_id='hls', fatal=False))
+
+ rtsp_url = stream_info.get('rtsp_url')
+ if rtsp_url:
+ formats.append({
+ 'url': rtsp_url,
+ 'format_id': 'rtsp',
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': broadcast_id,
+ 'formats': formats,
+ 'title': self._live_title(stream_info['stream_title']) if is_live else stream_info['stream_title'],
+ 'thumbnail': stream_info.get('thumbnail_url'),
+ 'is_live': is_live,
+ }
+
+ def _extract_event(self, event_data):
+ event_id = compat_str(event_data['id'])
+ account_id = compat_str(event_data['owner_account_id'])
+ feed_root_url = self._API_URL_TEMPLATE % (account_id, event_id) + '/feed.json'
+
+ stream_info = event_data.get('stream_info')
+ if stream_info:
+ return self._extract_stream_info(stream_info)
+
+ last_video = None
+ entries = []
+ for i in itertools.count(1):
+ if last_video is None:
+ info_url = feed_root_url
+ else:
+ info_url = '{root}?&id={id}&newer=-1&type=video'.format(
+ root=feed_root_url, id=last_video)
+ videos_info = self._download_json(
+ info_url, event_id, 'Downloading page {0}'.format(i))['data']
+ videos_info = [v['data'] for v in videos_info if v['type'] == 'video']
+ if not videos_info:
+ break
+ for v in videos_info:
+ entries.append(self.url_result(
+ 'http://livestream.com/accounts/%s/events/%s/videos/%s' % (account_id, event_id, v['id']),
+ 'Livestream', v['id'], v['caption']))
+ last_video = videos_info[-1]['id']
+ return self.playlist_result(entries, event_id, event_data['full_name'])
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
- event_name = mobj.group('event_name')
- webpage = self._download_webpage(url, video_id or event_name)
-
- og_video = self._og_search_video_url(
- webpage, 'player url', fatal=False, default=None)
- if og_video is not None:
- query_str = compat_urllib_parse_urlparse(og_video).query
- query = compat_urlparse.parse_qs(query_str)
- if 'play_url' in query:
- api_url = query['play_url'][0].replace('.smil', '')
- info = json.loads(self._download_webpage(
- api_url, video_id, 'Downloading video info'))
- return self._extract_video_info(info)
-
- config_json = self._search_regex(
- r'window.config = ({.*?});', webpage, 'window config')
- info = json.loads(config_json)['event']
-
- def is_relevant(vdata, vid):
- result = vdata['type'] == 'video'
- if video_id is not None:
- result = result and compat_str(vdata['data']['id']) == vid
- return result
-
- if video_id is None:
- # This is an event page:
- return self._extract_event(info)
+ event = mobj.group('event_id') or mobj.group('event_name')
+ account = mobj.group('account_id') or mobj.group('account_name')
+ api_url = self._API_URL_TEMPLATE % (account, event)
+ if video_id:
+ video_data = self._download_json(
+ api_url + '/videos/%s' % video_id, video_id)
+ return self._extract_video_info(video_data)
else:
- videos = [self._extract_video_info(video_data['data'])
- for video_data in info['feed']['data']
- if is_relevant(video_data, video_id)]
- if not videos:
- raise ExtractorError('Cannot find video %s' % video_id)
- return videos[0]
+ event_data = self._download_json(api_url, video_id)
+ return self._extract_event(event_data)
# The original version of Livestream uses a different system
class LivestreamOriginalIE(InfoExtractor):
IE_NAME = 'livestream:original'
_VALID_URL = r'''(?x)https?://original\.livestream\.com/
- (?P<user>[^/]+)/(?P<type>video|folder)
- (?:\?.*?Id=|/)(?P<id>.*?)(&|$)
+ (?P<user>[^/\?#]+)(?:/(?P<type>video|folder)
+ (?:(?:\?.*?Id=|/)(?P<id>.*?)(&|$))?)?
'''
_TESTS = [{
'url': 'http://original.livestream.com/dealbook/video?clipId=pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
'id': 'pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
'ext': 'mp4',
'title': 'Spark 1 (BitCoin) with Cameron Winklevoss & Tyler Winklevoss of Winklevoss Capital',
+ 'duration': 771.301,
+ 'view_count': int,
},
}, {
'url': 'https://original.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3',
'id': 'a07bf706-d0e4-4e75-a747-b021d84f2fd3',
},
'playlist_mincount': 4,
+ }, {
+ # live stream
+ 'url': 'http://original.livestream.com/znsbahamas',
+ 'only_matching': True,
}]
- def _extract_video(self, user, video_id):
- api_url = 'http://x{0}x.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id={1}'.format(user, video_id)
-
+ def _extract_video_info(self, user, video_id):
+ api_url = 'http://x%sx.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id=%s' % (user, video_id)
info = self._download_xml(api_url, video_id)
- # this url is used on mobile devices
- stream_url = 'http://x{0}x.api.channel.livestream.com/3.0/getstream.json?id={1}'.format(user, video_id)
- stream_info = self._download_json(stream_url, video_id)
+
item = info.find('channel').find('item')
- ns = {'media': 'http://search.yahoo.com/mrss'}
- thumbnail_url = item.find(xpath_with_ns('media:thumbnail', ns)).attrib['url']
+ title = xpath_text(item, 'title')
+ media_ns = {'media': 'http://search.yahoo.com/mrss'}
+ thumbnail_url = xpath_attr(
+ item, xpath_with_ns('media:thumbnail', media_ns), 'url')
+ duration = float_or_none(xpath_attr(
+ item, xpath_with_ns('media:content', media_ns), 'duration'))
+ ls_ns = {'ls': 'http://api.channel.livestream.com/2.0'}
+ view_count = int_or_none(xpath_text(
+ item, xpath_with_ns('ls:viewsCount', ls_ns)))
return {
'id': video_id,
- 'title': item.find('title').text,
- 'url': stream_info['progressiveUrl'],
+ 'title': title,
'thumbnail': thumbnail_url,
+ 'duration': duration,
+ 'view_count': view_count,
}
+ def _extract_video_formats(self, video_data, video_id, entry_protocol):
+ formats = []
+
+ progressive_url = video_data.get('progressiveUrl')
+ if progressive_url:
+ formats.append({
+ 'url': progressive_url,
+ 'format_id': 'http',
+ })
+
+ m3u8_url = video_data.get('httpUrl')
+ if m3u8_url:
+ formats.extend(self._extract_m3u8_formats(
+ m3u8_url, video_id, 'mp4', entry_protocol, m3u8_id='hls', fatal=False))
+
+ rtsp_url = video_data.get('rtspUrl')
+ if rtsp_url:
+ formats.append({
+ 'url': rtsp_url,
+ 'format_id': 'rtsp',
+ })
+
+ self._sort_formats(formats)
+ return formats
+
def _extract_folder(self, url, folder_id):
webpage = self._download_webpage(url, folder_id)
paths = orderedSet(re.findall(
<a\s+href="(?=https?://livestre\.am/)
)([^"]+)"''', webpage))
- return {
- '_type': 'playlist',
- 'id': folder_id,
- 'entries': [{
- '_type': 'url',
- 'url': compat_urlparse.urljoin(url, p),
- } for p in paths],
- }
+ entries = [{
+ '_type': 'url',
+ 'url': compat_urlparse.urljoin(url, p),
+ } for p in paths]
+
+ return self.playlist_result(entries, folder_id)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- id = mobj.group('id')
user = mobj.group('user')
url_type = mobj.group('type')
+ content_id = mobj.group('id')
if url_type == 'folder':
- return self._extract_folder(url, id)
+ return self._extract_folder(url, content_id)
else:
- return self._extract_video(user, id)
+ # this url is used on mobile devices
+ stream_url = 'http://x%sx.api.channel.livestream.com/3.0/getstream.json' % user
+ info = {}
+ if content_id:
+ stream_url += '?id=%s' % content_id
+ info = self._extract_video_info(user, content_id)
+ else:
+ content_id = user
+ webpage = self._download_webpage(url, content_id)
+ info = {
+ 'title': self._og_search_title(webpage),
+ 'description': self._og_search_description(webpage),
+ 'thumbnail': self._search_regex(r'channelLogo.src\s*=\s*"([^"]+)"', webpage, 'thumbnail', None),
+ }
+ video_data = self._download_json(stream_url, content_id)
+ is_live = video_data.get('isLive')
+ entry_protocol = 'm3u8' if is_live else 'm3u8_native'
+ info.update({
+ 'id': content_id,
+ 'title': self._live_title(info['title']) if is_live else info['title'],
+ 'formats': self._extract_video_formats(video_data, content_id, entry_protocol),
+ 'is_live': is_live,
+ })
+ return info
# The server doesn't support HEAD request, the generic extractor can't detect
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .nuevo import NuevoBaseIE
+
+
+class LoveHomePornIE(NuevoBaseIE):
+ _VALID_URL = r'https?://(?:www\.)?lovehomeporn\.com/video/(?P<id>\d+)(?:/(?P<display_id>[^/?#&]+))?'
+ _TEST = {
+ 'url': 'http://lovehomeporn.com/video/48483/stunning-busty-brunette-girlfriend-sucking-and-riding-a-big-dick#menu',
+ 'info_dict': {
+ 'id': '48483',
+ 'display_id': 'stunning-busty-brunette-girlfriend-sucking-and-riding-a-big-dick',
+ 'ext': 'mp4',
+ 'title': 'Stunning busty brunette girlfriend sucking and riding a big dick',
+ 'age_limit': 18,
+ 'duration': 238.47,
+ },
+ 'params': {
+ 'skip_download': True,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ display_id = mobj.group('display_id')
+
+ info = self._extract_nuevo(
+ 'http://lovehomeporn.com/media/nuevo/config.php?key=%s' % video_id,
+ video_id)
+ info.update({
+ 'display_id': display_id,
+ 'age_limit': 18
+ })
+ return info
# coding: utf-8
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
from ..utils import (
- determine_ext,
- js_to_json,
+ int_or_none,
parse_duration,
remove_end,
)
'title': 'Septynios Kauno dienos',
'description': 'md5:24d84534c7dc76581e59f5689462411a',
'duration': 1783,
+ 'view_count': int,
+ 'like_count': int,
},
'params': {
- 'skip_download': True, # HLS download
+ 'skip_download': True, # m3u8 download
},
}
webpage = self._download_webpage(url, video_id)
title = remove_end(self._og_search_title(webpage), ' - LRT')
+ m3u8_url = self._search_regex(
+ r'file\s*:\s*(["\'])(?P<url>.+?)\1\s*\+\s*location\.hash\.substring\(1\)',
+ webpage, 'm3u8 url', group='url')
+ formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4')
+
thumbnail = self._og_search_thumbnail(webpage)
description = self._og_search_description(webpage)
duration = parse_duration(self._search_regex(
- r"'duration':\s*'([^']+)',", webpage,
- 'duration', fatal=False, default=None))
+ r'var\s+record_len\s*=\s*(["\'])(?P<duration>[0-9]+:[0-9]+:[0-9]+)\1',
+ webpage, 'duration', default=None, group='duration'))
- formats = []
- for js in re.findall(r'(?s)config:\s*(\{.*?\})', webpage):
- data = self._parse_json(js, video_id, transform_source=js_to_json)
- if 'provider' not in data:
- continue
- if data['provider'] == 'rtmp':
- formats.append({
- 'format_id': 'rtmp',
- 'ext': determine_ext(data['file']),
- 'url': data['streamer'],
- 'play_path': 'mp4:%s' % data['file'],
- 'preference': -1,
- 'rtmp_real_time': True,
- })
- else:
- formats.extend(
- self._extract_m3u8_formats(data['file'], video_id, 'mp4'))
+ view_count = int_or_none(self._html_search_regex(
+ r'<div[^>]+class=(["\']).*?record-desc-seen.*?\1[^>]*>(?P<count>.+?)</div>',
+ webpage, 'view count', fatal=False, group='count'))
+ like_count = int_or_none(self._search_regex(
+ r'<span[^>]+id=(["\'])flikesCount.*?\1>(?P<count>\d+)<',
+ webpage, 'like count', fatal=False, group='count'))
return {
'id': video_id,
'thumbnail': thumbnail,
'description': description,
'duration': duration,
+ 'view_count': view_count,
+ 'like_count': like_count,
}
import re
from .common import InfoExtractor
+from ..utils import (
+ int_or_none,
+ remove_end,
+)
class MailRuIE(InfoExtractor):
'id': '46843144_1263',
'ext': 'mp4',
'title': 'Samsung Galaxy S5 Hammer Smash Fail Battery Explosion',
- 'timestamp': 1397217632,
- 'upload_date': '20140411',
- 'uploader': 'hitech',
+ 'timestamp': 1397039888,
+ 'upload_date': '20140409',
+ 'uploader': 'hitech@corp.mail.ru',
'uploader_id': 'hitech@corp.mail.ru',
'duration': 245,
},
'skip': 'Not accessible from Travis CI server',
},
+ {
+ # only available via metaUrl API
+ 'url': 'http://my.mail.ru/mail/720pizle/video/_myvideo/502.html',
+ 'md5': '3b26d2491c6949d031a32b96bd97c096',
+ 'info_dict': {
+ 'id': '56664382_502',
+ 'ext': 'mp4',
+ 'title': ':8336',
+ 'timestamp': 1449094163,
+ 'upload_date': '20151202',
+ 'uploader': '720pizle@mail.ru',
+ 'uploader_id': '720pizle@mail.ru',
+ 'duration': 6001,
+ },
+ 'skip': 'Not accessible from Travis CI server',
+ }
]
def _real_extract(self, url):
if not video_id:
video_id = mobj.group('idv2prefix') + mobj.group('idv2suffix')
- video_data = self._download_json(
- 'http://api.video.mail.ru/videos/%s.json?new=1' % video_id, video_id, 'Downloading video JSON')
+ webpage = self._download_webpage(url, video_id)
- author = video_data['author']
- uploader = author['name']
- uploader_id = author.get('id') or author.get('email')
- view_count = video_data.get('views_count')
+ video_data = None
- meta_data = video_data['meta']
- content_id = '%s_%s' % (
- meta_data.get('accId', ''), meta_data['itemId'])
- title = meta_data['title']
- if title.endswith('.mp4'):
- title = title[:-4]
- thumbnail = meta_data['poster']
- duration = meta_data['duration']
- timestamp = meta_data['timestamp']
-
- formats = [
- {
- 'url': video['url'],
- 'format_id': video['key'],
- 'height': int(video['key'].rstrip('p'))
- } for video in video_data['videos']
- ]
+ page_config = self._parse_json(self._search_regex(
+ r'(?s)<script[^>]+class="sp-video__page-config"[^>]*>(.+?)</script>',
+ webpage, 'page config', default='{}'), video_id, fatal=False)
+ if page_config:
+ meta_url = page_config.get('metaUrl') or page_config.get('video', {}).get('metaUrl')
+ if meta_url:
+ video_data = self._download_json(
+ meta_url, video_id, 'Downloading video meta JSON', fatal=False)
+
+ # Fallback old approach
+ if not video_data:
+ video_data = self._download_json(
+ 'http://api.video.mail.ru/videos/%s.json?new=1' % video_id,
+ video_id, 'Downloading video JSON')
+
+ formats = []
+ for f in video_data['videos']:
+ video_url = f.get('url')
+ if not video_url:
+ continue
+ format_id = f.get('key')
+ height = int_or_none(self._search_regex(
+ r'^(\d+)[pP]$', format_id, 'height', default=None)) if format_id else None
+ formats.append({
+ 'url': video_url,
+ 'format_id': format_id,
+ 'height': height,
+ })
self._sort_formats(formats)
+ meta_data = video_data['meta']
+ title = remove_end(meta_data['title'], '.mp4')
+
+ author = video_data.get('author')
+ uploader = author.get('name')
+ uploader_id = author.get('id') or author.get('email')
+ view_count = int_or_none(video_data.get('viewsCount') or video_data.get('views_count'))
+
+ acc_id = meta_data.get('accId')
+ item_id = meta_data.get('itemId')
+ content_id = '%s_%s' % (acc_id, item_id) if acc_id and item_id else video_id
+
+ thumbnail = meta_data.get('poster')
+ duration = int_or_none(meta_data.get('duration'))
+ timestamp = int_or_none(meta_data.get('timestamp'))
+
return {
'id': content_id,
'title': title,
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class MakerTVIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:(?:www\.)?maker\.tv/(?:[^/]+/)*video|makerplayer.com/embed/maker)/(?P<id>[a-zA-Z0-9]{12})'
+ _TEST = {
+ 'url': 'http://www.maker.tv/video/Fh3QgymL9gsc',
+ 'md5': 'ca237a53a8eb20b6dc5bd60564d4ab3e',
+ 'info_dict': {
+ 'id': 'Fh3QgymL9gsc',
+ 'ext': 'mp4',
+ 'title': 'Maze Runner: The Scorch Trials Official Movie Review',
+ 'description': 'md5:11ff3362d7ef1d679fdb649f6413975a',
+ 'upload_date': '20150918',
+ 'timestamp': 1442549540,
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+ jwplatform_id = self._search_regex(r'jw_?id="([^"]+)"', webpage, 'jwplatform id')
+
+ return {
+ '_type': 'url_transparent',
+ 'id': video_id,
+ 'url': 'jwplatform:%s' % jwplatform_id,
+ 'ie_key': 'JWPlatform',
+ }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import random
+
+from .common import InfoExtractor
+from ..compat import compat_urllib_parse
+from ..utils import (
+ sanitized_Request,
+ xpath_text,
+)
+
+
+class MatchTVIE(InfoExtractor):
+ _VALID_URL = r'https?://matchtv\.ru/?#live-player'
+ _TEST = {
+ 'url': 'http://matchtv.ru/#live-player',
+ 'info_dict': {
+ 'id': 'matchtv-live',
+ 'ext': 'flv',
+ 'title': 're:^Матч ТВ - Прямой эфир \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
+ 'is_live': True,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = 'matchtv-live'
+ request = sanitized_Request(
+ 'http://player.matchtv.ntvplus.tv/player/smil?%s' % compat_urllib_parse.urlencode({
+ 'ts': '',
+ 'quality': 'SD',
+ 'contentId': '561d2c0df7159b37178b4567',
+ 'sign': '',
+ 'includeHighlights': '0',
+ 'userId': '',
+ 'sessionId': random.randint(1, 1000000000),
+ 'contentType': 'channel',
+ 'timeShift': '0',
+ 'platform': 'portal',
+ }),
+ headers={
+ 'Referer': 'http://player.matchtv.ntvplus.tv/embed-player/NTVEmbedPlayer.swf',
+ })
+ video_url = self._download_json(request, video_id)['data']['videoUrl']
+ f4m_url = xpath_text(self._download_xml(video_url, video_id), './to')
+ formats = self._extract_f4m_formats(f4m_url, video_id)
+ return {
+ 'id': video_id,
+ 'title': self._live_title('Матч ТВ - Прямой эфир'),
+ 'is_live': True,
+ 'formats': formats,
+ }
_VALID_URL = r'https?://(?:www\.)?(?:mdr|kika)\.de/(?:.*)/[a-z]+(?P<id>\d+)(?:_.+?)?\.html'
_TESTS = [{
- # MDR regularily deletes its videos
+ # MDR regularly deletes its videos
'url': 'http://www.mdr.de/fakt/video189002.html',
'only_matching': True,
}, {
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
video_url = None
- mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
+ mobj = re.search(r'(?m)&(?:media|video)URL=([^&]+)', webpage)
if mobj is not None:
mediaURL = compat_urllib_parse_unquote(mobj.group(1))
- video_ext = mediaURL[-3:]
+ video_ext = determine_ext(mediaURL)
# Extract gdaKey if available
mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
age_limit = (
18
- if re.search(r'"contentRating":"restricted"', webpage)
+ if re.search(r'(?:"contentRating":|"rating",)"restricted"', webpage)
else 0)
if isinstance(video_url, list):
preview_url = self._search_regex(
r'\s(?:data-preview-url|m-preview)="([^"]+)"', webpage, 'preview url')
- song_url = preview_url.replace('/previews/', '/c/originals/')
+ song_url = re.sub(r'audiocdn(\d+)', r'stream\1', preview_url)
+ song_url = song_url.replace('/previews/', '/c/originals/')
if not self._check_url(song_url, track_id, 'mp3'):
song_url = song_url.replace('.mp3', '.m4a').replace('originals/', 'm4a/64/')
if not self._check_url(song_url, track_id, 'm4a'):
path = compat_urllib_parse_urlparse(video_url).path
extension = os.path.splitext(path)[1][1:]
format = path.split('/')[5].split('_')[:2]
- format = "-".join(format)
+ format = '-'.join(format)
age_limit = self._rta_search(webpage)
+++ /dev/null
-from __future__ import unicode_literals
-
-from .novamov import NovaMovIE
-
-
-class MovShareIE(NovaMovIE):
- IE_NAME = 'movshare'
- IE_DESC = 'MovShare'
-
- _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'movshare\.(?:net|sx|ag)'}
-
- _HOST = 'www.movshare.net'
-
- _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
- _TITLE_REGEX = r'<strong>Title:</strong> ([^<]+)</p>'
- _DESCRIPTION_REGEX = r'<strong>Description:</strong> ([^<]+)</p>'
-
- _TEST = {
- 'url': 'http://www.movshare.net/video/559e28be54d96',
- 'md5': 'abd31a2132947262c50429e1d16c1bfd',
- 'info_dict': {
- 'id': '559e28be54d96',
- 'ext': 'flv',
- 'title': 'dissapeared image',
- 'description': 'optical illusion dissapeared image magic illusion',
- }
- }
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
+ float_or_none,
HEADRequest,
sanitized_Request,
unescapeHTML,
uri = itemdoc.find('guid').text
video_id = self._id_from_uri(uri)
self.report_extraction(video_id)
- mediagen_url = itemdoc.find('%s/%s' % (_media_xml_tag('group'), _media_xml_tag('content'))).attrib['url']
+ content_el = itemdoc.find('%s/%s' % (_media_xml_tag('group'), _media_xml_tag('content')))
+ mediagen_url = content_el.attrib['url']
# Remove the templates, like &device={device}
mediagen_url = re.sub(r'&[^=]*?={.*?}(?=(&|$))', '', mediagen_url)
if 'acceptMethods' not in mediagen_url:
'id': video_id,
'thumbnail': self._get_thumbnail_url(uri, itemdoc),
'description': description,
+ 'duration': float_or_none(content_el.attrib.get('duration')),
}
+ def _get_feed_query(self, uri):
+ data = {'uri': uri}
+ if self._LANG:
+ data['lang'] = self._LANG
+ return compat_urllib_parse.urlencode(data)
+
def _get_videos_info(self, uri):
video_id = self._id_from_uri(uri)
feed_url = self._get_feed_url(uri)
- data = compat_urllib_parse.urlencode({'uri': uri})
- info_url = feed_url + '?'
- if self._LANG:
- info_url += 'lang=%s&' % self._LANG
- info_url += data
+ info_url = feed_url + '?' + self._get_feed_query(uri)
return self._get_videos_info_from_url(info_url, video_id)
def _get_videos_info_from_url(self, url, video_id):
return self.playlist_result(
[self._get_video_info(item) for item in idoc.findall('.//item')])
- def _real_extract(self, url):
- title = url_basename(url)
- webpage = self._download_webpage(url, title)
+ def _extract_mgid(self, webpage):
try:
# the url can be http://media.mtvnservices.com/fb/{mgid}.swf
# or http://media.mtvnservices.com/{mgid}
'sm4:video:embed', webpage, 'sm4 embed', default='')
mgid = self._search_regex(
r'embed/(mgid:.+?)["\'&?/]', sm4_embed, 'mgid')
+ return mgid
+ def _real_extract(self, url):
+ title = url_basename(url)
+ webpage = self._download_webpage(url, title)
+ mgid = self._extract_mgid(webpage)
videos_info = self._get_videos_info(mgid)
return videos_info
'info_dict': {
'id': '11741',
'ext': 'mp4',
- "description": "Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?",
- "title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2",
+ 'description': 'Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?',
+ 'title': 'Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2',
},
}
class MyVideoIE(InfoExtractor):
+ _WORKING = False
_VALID_URL = r'http://(?:www\.)?myvideo\.de/(?:[^/]+/)?watch/(?P<id>[0-9]+)/[^?/]+.*'
IE_NAME = 'myvideo'
_TEST = {
from __future__ import unicode_literals
+import re
+
from .common import InfoExtractor
from ..utils import (
- remove_end,
parse_duration,
+ int_or_none,
+ xpath_text,
+ xpath_attr,
)
class NBAIE(InfoExtractor):
- _VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?:nba/)?video(?P<id>/[^?]*?)/?(?:/index\.html)?(?:\?.*)?$'
+ _VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?P<path>(?:[^/]+/)?video/(?P<id>[^?]*?))/?(?:/index\.html)?(?:\?.*)?$'
_TESTS = [{
'url': 'http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html',
- 'md5': 'c0edcfc37607344e2ff8f13c378c88a4',
+ 'md5': '9e7729d3010a9c71506fd1248f74e4f4',
'info_dict': {
- 'id': '0021200253-okc-bkn-recap.nba',
+ 'id': '0021200253-okc-bkn-recap',
'ext': 'mp4',
'title': 'Thunder vs. Nets',
'description': 'Kevin Durant scores 32 points and dishes out six assists as the Thunder beat the Nets in Brooklyn.',
'duration': 181,
+ 'timestamp': 1354638466,
+ 'upload_date': '20121204',
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
},
}, {
'url': 'http://www.nba.com/video/games/hornets/2014/12/05/0021400276-nyk-cha-play5.nba/',
'only_matching': True,
}, {
- 'url': 'http://watch.nba.com/nba/video/channels/playoffs/2015/05/20/0041400301-cle-atl-recap.nba',
+ 'url': 'http://watch.nba.com/video/channels/playoffs/2015/05/20/0041400301-cle-atl-recap.nba',
+ 'md5': 'b2b39b81cf28615ae0c3360a3f9668c4',
'info_dict': {
- 'id': '0041400301-cle-atl-recap.nba',
+ 'id': '0041400301-cle-atl-recap',
'ext': 'mp4',
- 'title': 'NBA GAME TIME | Video: Hawks vs. Cavaliers Game 1',
+ 'title': 'Hawks vs. Cavaliers Game 1',
'description': 'md5:8094c3498d35a9bd6b1a8c396a071b4d',
'duration': 228,
- },
- 'params': {
- 'skip_download': True,
+ 'timestamp': 1432134543,
+ 'upload_date': '20150520',
}
}]
def _real_extract(self, url):
- video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
-
- video_url = 'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4'
+ path, video_id = re.match(self._VALID_URL, url).groups()
+ if path.startswith('nba/'):
+ path = path[3:]
+ video_info = self._download_xml('http://www.nba.com/%s.xml' % path, video_id)
+ video_id = xpath_text(video_info, 'slug')
+ title = xpath_text(video_info, 'headline')
+ description = xpath_text(video_info, 'description')
+ duration = parse_duration(xpath_text(video_info, 'length'))
+ timestamp = int_or_none(xpath_attr(video_info, 'dateCreated', 'uts'))
- shortened_video_id = video_id.rpartition('/')[2]
- title = remove_end(
- self._og_search_title(webpage, default=shortened_video_id), ' : NBA.com')
+ thumbnails = []
+ for image in video_info.find('images'):
+ thumbnails.append({
+ 'id': image.attrib.get('cut'),
+ 'url': image.text,
+ 'width': int_or_none(image.attrib.get('width')),
+ 'height': int_or_none(image.attrib.get('height')),
+ })
- description = self._og_search_description(webpage)
- duration_str = self._html_search_meta(
- 'duration', webpage, 'duration', default=None)
- if not duration_str:
- duration_str = self._html_search_regex(
- r'Duration:</b>\s*(\d+:\d+)', webpage, 'duration', fatal=False)
- duration = parse_duration(duration_str)
+ formats = []
+ for video_file in video_info.findall('.//file'):
+ video_url = video_file.text
+ if video_url.startswith('/'):
+ continue
+ if video_url.endswith('.m3u8'):
+ formats.extend(self._extract_m3u8_formats(video_url, video_id, ext='mp4', m3u8_id='hls', fatal=False))
+ elif video_url.endswith('.f4m'):
+ formats.extend(self._extract_f4m_formats(video_url + '?hdcore=3.4.1.1', video_id, f4m_id='hds', fatal=False))
+ else:
+ key = video_file.attrib.get('bitrate')
+ format_info = {
+ 'format_id': key,
+ 'url': video_url,
+ }
+ mobj = re.search(r'(\d+)x(\d+)(?:_(\d+))?', key)
+ if mobj:
+ format_info.update({
+ 'width': int(mobj.group(1)),
+ 'height': int(mobj.group(2)),
+ 'tbr': int_or_none(mobj.group(3)),
+ })
+ formats.append(format_info)
+ self._sort_formats(formats)
return {
- 'id': shortened_video_id,
- 'url': video_url,
+ 'id': video_id,
'title': title,
'description': description,
'duration': duration,
+ 'timestamp': timestamp,
+ 'thumbnails': thumbnails,
+ 'formats': formats,
}
import re
from .common import InfoExtractor
-from ..compat import (
- compat_str,
- compat_HTTPError,
-)
+from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
find_xpath_attr,
lowercase_escape,
+ smuggle_url,
unescapeHTML,
)
_TESTS = [
{
'url': 'http://www.nbc.com/the-tonight-show/segments/112966',
- # md5 checksum is not stable
'info_dict': {
- 'id': 'c9xnCo0YPOPH',
- 'ext': 'flv',
+ 'id': '112966',
+ 'ext': 'mp4',
'title': 'Jimmy Fallon Surprises Fans at Ben & Jerry\'s',
'description': 'Jimmy gives out free scoops of his new "Tonight Dough" ice cream flavor by surprising customers at the Ben & Jerry\'s scoop shop.',
},
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
},
{
'url': 'http://www.nbc.com/the-tonight-show/episodes/176',
'info_dict': {
- 'id': 'XwU9KZkp98TH',
+ 'id': '176',
'ext': 'flv',
'title': 'Ricky Gervais, Steven Van Zandt, ILoveMakonnen',
'description': 'A brand new episode of The Tonight Show welcomes Ricky Gervais, Steven Van Zandt and ILoveMakonnen.',
},
- 'skip': 'Only works from US',
+ 'skip': '404 Not Found',
},
{
'url': 'http://www.nbc.com/saturday-night-live/video/star-wars-teaser/2832821',
'info_dict': {
- 'id': '8iUuyzWDdYUZ',
- 'ext': 'flv',
+ 'id': '2832821',
+ 'ext': 'mp4',
'title': 'Star Wars Teaser',
'description': 'md5:0b40f9cbde5b671a7ff62fceccc4f442',
},
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
'skip': 'Only works from US',
},
{
# This video has expired but with an escaped embedURL
'url': 'http://www.nbc.com/parenthood/episode-guide/season-5/just-like-at-home/515',
- 'skip': 'Expired'
+ 'only_matching': True,
}
]
theplatform_url = unescapeHTML(lowercase_escape(self._html_search_regex(
[
r'(?:class="video-player video-player-full" data-mpx-url|class="player" src)="(.*?)"',
+ r'<iframe[^>]+src="((?:https?:)?//player\.theplatform\.com/[^"]+)"',
r'"embedURL"\s*:\s*"([^"]+)"'
],
webpage, 'theplatform url').replace('_no_endcard', '').replace('\\/', '/')))
if theplatform_url.startswith('//'):
theplatform_url = 'http:' + theplatform_url
- return self.url_result(theplatform_url)
+ return {
+ '_type': 'url_transparent',
+ 'url': smuggle_url(theplatform_url, {'source_url': url}),
+ 'id': video_id,
+ }
class NBCSportsVPlayerIE(InfoExtractor):
class NBCSportsIE(InfoExtractor):
- # Does not include https becuase its certificate is invalid
+ # Does not include https because its certificate is invalid
_VALID_URL = r'http://www\.nbcsports\.com//?(?:[^/]+/)+(?P<id>[0-9a-z-]+)'
_TEST = {
'title': info.find('headline').text,
'ext': 'flv',
'url': find_xpath_attr(info, 'media', 'type', 'flashVideo').text,
- 'description': compat_str(info.find('caption').text),
+ 'description': info.find('caption').text,
'thumbnail': find_xpath_attr(info, 'media', 'type', 'thumbnail').text,
}
else:
'embedURL', webpage, 'embed URL', fatal=True)
description = self._search_regex(
r'<p[^>]+itemprop="description">([^<]+)</p>',
- webpage, 'description', fatal=False)
+ webpage, 'description', default=None) or self._og_search_description(webpage)
timestamp = parse_iso8601(
self._search_regex(
- r'<span itemprop="datePublished" content="([^"]+)">',
+ r'<span[^>]+itemprop="(?:datePublished|uploadDate)"[^>]+content="([^"]+)"',
webpage, 'upload date', fatal=False))
return {
'_type': 'url_transparent',
src + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id, f4m_id='hds'))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
- src, video_id, m3u8_id='hls', entry_protocol='m3u8_native'))
+ src, video_id, 'mp4', m3u8_id='hls', entry_protocol='m3u8_native'))
else:
quality = f.get('quality')
ff = {
}
def _real_extract(self, url):
- feed = self._download_json(url, url, "Downloading NerdCubed JSON feed")
+ feed = self._download_json(url, url, 'Downloading NerdCubed JSON feed')
entries = [{
'_type': 'url',
'title': feed_entry['title'],
'uploader': feed_entry['source']['name'] if feed_entry['source'] else None,
'upload_date': datetime.datetime.strptime(feed_entry['date'], '%Y-%m-%d').strftime('%Y%m%d'),
- 'url': "http://www.youtube.com/watch?v=" + feed_entry['youtube_id'],
+ 'url': 'http://www.youtube.com/watch?v=' + feed_entry['youtube_id'],
} for feed_entry in feed]
return {
compat_str,
compat_itertools_count,
)
-from ..utils import sanitized_Request
+from ..utils import (
+ sanitized_Request,
+ float_or_none,
+)
class NetEaseMusicBaseIE(InfoExtractor):
result = b64encode(m.digest()).decode('ascii')
return result.replace('/', '_').replace('+', '-')
- @classmethod
- def extract_formats(cls, info):
+ def extract_formats(self, info):
formats = []
- for song_format in cls._FORMATS:
+ for song_format in self._FORMATS:
details = info.get(song_format)
if not details:
continue
- formats.append({
- 'url': 'http://m5.music.126.net/%s/%s.%s' %
- (cls._encrypt(details['dfsId']), details['dfsId'],
- details['extension']),
- 'ext': details.get('extension'),
- 'abr': details.get('bitrate', 0) / 1000,
- 'format_id': song_format,
- 'filesize': details.get('size'),
- 'asr': details.get('sr')
- })
+ song_file_path = '/%s/%s.%s' % (
+ self._encrypt(details['dfsId']), details['dfsId'], details['extension'])
+
+ # 203.130.59.9, 124.40.233.182, 115.231.74.139, etc is a reverse proxy-like feature
+ # from NetEase's CDN provider that can be used if m5.music.126.net does not
+ # work, especially for users outside of Mainland China
+ # via: https://github.com/JixunMoe/unblock-163/issues/3#issuecomment-163115880
+ for host in ('http://m5.music.126.net', 'http://115.231.74.139/m1.music.126.net',
+ 'http://124.40.233.182/m1.music.126.net', 'http://203.130.59.9/m1.music.126.net'):
+ song_url = host + song_file_path
+ if self._is_valid_url(song_url, info['id'], 'song'):
+ formats.append({
+ 'url': song_url,
+ 'ext': details.get('extension'),
+ 'abr': float_or_none(details.get('bitrate'), scale=1000),
+ 'format_id': song_format,
+ 'filesize': details.get('size'),
+ 'asr': details.get('sr')
+ })
+ break
return formats
@classmethod
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .mtv import MTVServicesInfoExtractor
+from ..compat import compat_urllib_parse
+
+
+class NextMovieIE(MTVServicesInfoExtractor):
+ IE_NAME = 'nextmovie.com'
+ _VALID_URL = r'https?://(?:www\.)?nextmovie\.com/shows/[^/]+/\d{4}-\d{2}-\d{2}/(?P<id>[^/?#]+)'
+ _FEED_URL = 'http://lite.dextr.mtvi.com/service1/dispatch.htm'
+ _TESTS = [{
+ 'url': 'http://www.nextmovie.com/shows/exclusives/2013-03-10/mgid:uma:videolist:nextmovie.com:1715019/',
+ 'md5': '09a9199f2f11f10107d04fcb153218aa',
+ 'info_dict': {
+ 'id': '961726',
+ 'ext': 'mp4',
+ 'title': 'The Muppets\' Gravity',
+ },
+ }]
+
+ def _get_feed_query(self, uri):
+ return compat_urllib_parse.urlencode({
+ 'feed': '1505',
+ 'mgid': uri,
+ })
+
+ def _real_extract(self, url):
+ mgid = self._match_id(url)
+ return self._get_videos_info(mgid)
response = self._download_webpage(request_url, playlist_title)
response = self._fix_json(response)
if not response.strip():
- self._downloader.report_warning('Got an empty reponse, trying '
+ self._downloader.report_warning('Got an empty response, trying '
'adding the "newvideos" parameter')
response = self._download_webpage(request_url + '&newvideos=true',
playlist_title)
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .mtv import MTVServicesInfoExtractor
+from ..compat import compat_urllib_parse
+
+
+class NickIE(MTVServicesInfoExtractor):
+ IE_NAME = 'nick.com'
+ _VALID_URL = r'https?://(?:www\.)?nick\.com/videos/clip/(?P<id>[^/?#.]+)'
+ _FEED_URL = 'http://udat.mtvnservices.com/service1/dispatch.htm'
+ _TESTS = [{
+ 'url': 'http://www.nick.com/videos/clip/alvinnn-and-the-chipmunks-112-full-episode.html',
+ 'playlist': [
+ {
+ 'md5': '6e5adc1e28253bbb1b28ab05403dd4d4',
+ 'info_dict': {
+ 'id': 'be6a17b0-412d-11e5-8ff7-0026b9414f30',
+ 'ext': 'mp4',
+ 'title': 'ALVINNN!!! and The Chipmunks: "Mojo Missing/Who\'s The Animal" S1',
+ 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks’ big concert.\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.',
+
+ }
+ },
+ {
+ 'md5': 'd7be441fc53a1d4882fa9508a1e5b3ce',
+ 'info_dict': {
+ 'id': 'be6b8f96-412d-11e5-8ff7-0026b9414f30',
+ 'ext': 'mp4',
+ 'title': 'ALVINNN!!! and The Chipmunks: "Mojo Missing/Who\'s The Animal" S2',
+ 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks’ big concert.\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.',
+
+ }
+ },
+ {
+ 'md5': 'efffe1728a234b2b0d2f2b343dd1946f',
+ 'info_dict': {
+ 'id': 'be6cf7e6-412d-11e5-8ff7-0026b9414f30',
+ 'ext': 'mp4',
+ 'title': 'ALVINNN!!! and The Chipmunks: "Mojo Missing/Who\'s The Animal" S3',
+ 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks’ big concert.\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.',
+ }
+ },
+ {
+ 'md5': '1ec6690733ab9f41709e274a1d5c7556',
+ 'info_dict': {
+ 'id': 'be6e3354-412d-11e5-8ff7-0026b9414f30',
+ 'ext': 'mp4',
+ 'title': 'ALVINNN!!! and The Chipmunks: "Mojo Missing/Who\'s The Animal" S4',
+ 'description': 'Alvin is convinced his mojo was in a cap he gave to a fan, and must find a way to get his hat back before the Chipmunks’ big concert.\nDuring a costume visit to the zoo, Alvin finds himself mistaken for the real Tasmanian devil.',
+ }
+ },
+ ],
+ }]
+
+ def _get_feed_query(self, uri):
+ return compat_urllib_parse.urlencode({
+ 'feed': 'nick_arc_player_prime',
+ 'mgid': uri,
+ })
+
+ def _extract_mgid(self, webpage):
+ return self._search_regex(r'data-contenturi="([^"]+)', webpage, 'mgid')
from ..compat import (
compat_str,
compat_urllib_parse,
+ compat_urlparse,
)
from ..utils import (
clean_html,
if 'erreur' in login:
raise ExtractorError('Unable to login: %s' % clean_html(login['erreur']), expected=True)
+ @staticmethod
+ def _ts():
+ return int(time.time() * 1000)
+
def _call_api(self, path, video_id, note, sub_lang=None):
- ts = compat_str(int(time.time() * 1000))
+ ts = compat_str(self._ts() + self._ts_offset)
tk = hashlib.md5((hashlib.md5(ts.encode('ascii')).hexdigest() + '#8S?uCraTedap6a').encode('ascii')).hexdigest()
url = self._API_URL_TEMPLATE % (path, ts, tk)
if sub_lang:
url += self._SUB_LANG_TEMPLATE % sub_lang
- resp = self._download_json(url, video_id, note)
+ request = sanitized_Request(url)
+ request.add_header('Referer', self._referer)
+
+ resp = self._download_json(request, video_id, note)
if isinstance(resp, dict) and resp.get('error'):
self._raise_error(resp['error'], resp['description'])
expected=True)
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
+
+ # Timestamp adjustment offset between server time and local time
+ # must be calculated in order to use timestamps closest to server's
+ # in all API requests (see https://github.com/rg3/youtube-dl/issues/7864)
+ webpage = self._download_webpage(url, video_id)
+
+ player_url = self._search_regex(
+ r'(["\'])(?P<player>https?://noco\.tv/(?:[^/]+/)+NocoPlayer.+?\.swf.*?)\1',
+ webpage, 'noco player', group='player',
+ default='http://noco.tv/cdata/js/player/NocoPlayer-v1.2.40.swf')
+
+ qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(player_url).query)
+ ts = int_or_none(qs.get('ts', [None])[0])
+ self._ts_offset = ts - self._ts() if ts else 0
+ self._referer = player_url
medias = self._call_api(
'shows/%s/medias' % video_id,
'format_id': format_id_extended,
'width': int_or_none(fmt.get('res_width')),
'height': int_or_none(fmt.get('res_lines')),
- 'abr': int_or_none(fmt.get('audiobitrate')),
- 'vbr': int_or_none(fmt.get('videobitrate')),
+ 'abr': int_or_none(fmt.get('audiobitrate'), 1000),
+ 'vbr': int_or_none(fmt.get('videobitrate'), 1000),
'filesize': int_or_none(fmt.get('filesize')),
'format_note': qualities[format_id].get('quality_name'),
'quality': qualities[format_id].get('priority'),
IE_NAME = 'novamov'
IE_DESC = 'NovaMov'
- _VALID_URL_TEMPLATE = r'http://(?:(?:www\.)?%(host)s/(?:file|video)/|(?:(?:embed|www)\.)%(host)s/embed\.php\?(?:.*?&)?v=)(?P<id>[a-z\d]{13})'
+ _VALID_URL_TEMPLATE = r'http://(?:(?:www\.)?%(host)s/(?:file|video|mobile/#/videos)/|(?:(?:embed|www)\.)%(host)s/embed\.php\?(?:.*?&)?v=)(?P<id>[a-z\d]{13})'
_VALID_URL = _VALID_URL_TEMPLATE % {'host': 'novamov\.com'}
_HOST = 'www.novamov.com'
_FILE_DELETED_REGEX = r'This file no longer exists on our servers!</h2>'
- _FILEKEY_REGEX = r'flashvars\.filekey="(?P<filekey>[^"]+)";'
+ _FILEKEY_REGEX = r'flashvars\.filekey=(?P<filekey>"?[^"]+"?);'
_TITLE_REGEX = r'(?s)<div class="v_tab blockborder rounded5" id="v_tab1">\s*<h3>([^<]+)</h3>'
_DESCRIPTION_REGEX = r'(?s)<div class="v_tab blockborder rounded5" id="v_tab1">\s*<h3>[^<]+</h3><p>([^<]+)</p>'
+ _URL_TEMPLATE = 'http://%s/video/%s'
_TEST = {
'url': 'http://www.novamov.com/video/4rurhn9x446jj',
'skip': '"Invalid token" errors abound (in web interface as well as youtube-dl, there is nothing we can do about it.)'
}
+ def _check_existence(self, webpage, video_id):
+ if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
+ raise ExtractorError('Video %s does not exist' % video_id, expected=True)
+
def _real_extract(self, url):
video_id = self._match_id(url)
- url = 'http://%s/video/%s' % (self._HOST, video_id)
+ url = self._URL_TEMPLATE % (self._HOST, video_id)
webpage = self._download_webpage(
url, video_id, 'Downloading video page')
- if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
- raise ExtractorError('Video %s does not exist' % video_id, expected=True)
+ self._check_existence(webpage, video_id)
def extract_filekey(default=NO_DEFAULT):
- return self._search_regex(
+ filekey = self._search_regex(
self._FILEKEY_REGEX, webpage, 'filekey', default=default)
+ if filekey is not default and (filekey[0] != '"' or filekey[-1] != '"'):
+ return self._search_regex(
+ r'var\s+%s\s*=\s*"([^"]+)"' % re.escape(filekey), webpage, 'filekey', default=default)
+ else:
+ return filekey
filekey = extract_filekey(default=None)
request.add_header('Referer', post_url)
webpage = self._download_webpage(
request, video_id, 'Downloading continue to the video page')
+ self._check_existence(webpage, video_id)
filekey = extract_filekey()
'title': title,
'description': description
}
+
+
+class WholeCloudIE(NovaMovIE):
+ IE_NAME = 'wholecloud'
+ IE_DESC = 'WholeCloud'
+
+ _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': '(?:wholecloud\.net|movshare\.(?:net|sx|ag))'}
+
+ _HOST = 'www.wholecloud.net'
+
+ _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
+ _TITLE_REGEX = r'<strong>Title:</strong> ([^<]+)</p>'
+ _DESCRIPTION_REGEX = r'<strong>Description:</strong> ([^<]+)</p>'
+
+ _TEST = {
+ 'url': 'http://www.wholecloud.net/video/559e28be54d96',
+ 'md5': 'abd31a2132947262c50429e1d16c1bfd',
+ 'info_dict': {
+ 'id': '559e28be54d96',
+ 'ext': 'flv',
+ 'title': 'dissapeared image',
+ 'description': 'optical illusion dissapeared image magic illusion',
+ }
+ }
+
+
+class NowVideoIE(NovaMovIE):
+ IE_NAME = 'nowvideo'
+ IE_DESC = 'NowVideo'
+
+ _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'nowvideo\.(?:to|ch|ec|sx|eu|at|ag|co|li)'}
+
+ _HOST = 'www.nowvideo.to'
+
+ _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
+ _TITLE_REGEX = r'<h4>([^<]+)</h4>'
+ _DESCRIPTION_REGEX = r'</h4>\s*<p>([^<]+)</p>'
+
+ _TEST = {
+ 'url': 'http://www.nowvideo.sx/video/f1d6fce9a968b',
+ 'md5': '12c82cad4f2084881d8bc60ee29df092',
+ 'info_dict': {
+ 'id': 'f1d6fce9a968b',
+ 'ext': 'flv',
+ 'title': 'youtubedl test video BaWjenozKc',
+ 'description': 'Description',
+ },
+ }
+
+
+class VideoWeedIE(NovaMovIE):
+ IE_NAME = 'videoweed'
+ IE_DESC = 'VideoWeed'
+
+ _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'videoweed\.(?:es|com)'}
+
+ _HOST = 'www.videoweed.es'
+
+ _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
+ _TITLE_REGEX = r'<h1 class="text_shadow">([^<]+)</h1>'
+ _URL_TEMPLATE = 'http://%s/file/%s'
+
+ _TEST = {
+ 'url': 'http://www.videoweed.es/file/b42178afbea14',
+ 'md5': 'abd31a2132947262c50429e1d16c1bfd',
+ 'info_dict': {
+ 'id': 'b42178afbea14',
+ 'ext': 'flv',
+ 'title': 'optical illusion dissapeared image magic illusion',
+ 'description': ''
+ },
+ }
+
+
+class CloudTimeIE(NovaMovIE):
+ IE_NAME = 'cloudtime'
+ IE_DESC = 'CloudTime'
+
+ _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'cloudtime\.to'}
+
+ _HOST = 'www.cloudtime.to'
+
+ _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
+ _TITLE_REGEX = r'<div[^>]+class=["\']video_det["\'][^>]*>\s*<strong>([^<]+)</strong>'
+
+ _TEST = None
# encoding: utf-8
from __future__ import unicode_literals
-from .brightcove import BrightcoveLegacyIE
+from .brightcove import (
+ BrightcoveLegacyIE,
+ BrightcoveNewIE,
+)
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
note='Downloading player JavaScript',
errnote='Unable to download player JavaScript')
bc_url = BrightcoveLegacyIE._extract_brightcove_url(player_code)
- if bc_url is None:
- raise ExtractorError('Could not find player definition')
- return self.url_result(bc_url, 'BrightcoveLegacy')
+ if bc_url:
+ return self.url_result(bc_url, BrightcoveLegacyIE.ie_key())
+ bc_url = BrightcoveNewIE._extract_url(player_code)
+ if bc_url:
+ return self.url_result(bc_url, BrightcoveNewIE.ie_key())
+ raise ExtractorError('Could not find player definition')
elif source == 'vimeo':
return self.url_result('http://vimeo.com/%s' % video_id, 'Vimeo')
elif source == 'youtube':
class NowTVIE(NowTVBaseIE):
- _VALID_URL = r'https?://(?:www\.)?nowtv\.(?:de|at|ch)/(?:rtl|rtl2|rtlnitro|superrtl|ntv|vox)/(?P<show_id>[^/]+)/(?:list/[^/]+/)?(?P<id>[^/]+)/(?:player|preview)'
+ _WORKING = False
+ _VALID_URL = r'https?://(?:www\.)?nowtv\.(?:de|at|ch)/(?:rtl|rtl2|rtlnitro|superrtl|ntv|vox)/(?P<show_id>[^/]+)/(?:(?:list/[^/]+|jahr/\d{4}/\d{1,2})/)?(?P<id>[^/]+)/(?:player|preview)'
_TESTS = [{
# rtl
}, {
'url': 'http://www.nowtv.de/rtl2/echtzeit/list/aktuell/schnelles-geld-am-ende-der-welt/player',
'only_matching': True,
+ }, {
+ 'url': 'http://www.nowtv.de/rtl2/zuhause-im-glueck/jahr/2015/11/eine-erschuetternde-diagnose/player',
+ 'only_matching': True,
}]
def _real_extract(self, url):
+++ /dev/null
-from __future__ import unicode_literals
-
-from .novamov import NovaMovIE
-
-
-class NowVideoIE(NovaMovIE):
- IE_NAME = 'nowvideo'
- IE_DESC = 'NowVideo'
-
- _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'nowvideo\.(?:to|ch|ec|sx|eu|at|ag|co|li)'}
-
- _HOST = 'www.nowvideo.to'
-
- _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
- _FILEKEY_REGEX = r'var fkzd="([^"]+)";'
- _TITLE_REGEX = r'<h4>([^<]+)</h4>'
- _DESCRIPTION_REGEX = r'</h4>\s*<p>([^<]+)</p>'
-
- _TEST = {
- 'url': 'http://www.nowvideo.ch/video/0mw0yow7b6dxa',
- 'md5': 'f8fbbc8add72bd95b7850c6a02fc8817',
- 'info_dict': {
- 'id': '0mw0yow7b6dxa',
- 'ext': 'flv',
- 'title': 'youtubedl test video _BaW_jenozKc.mp4',
- 'description': 'Description',
- }
- }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_urllib_parse_unquote
+from ..utils import (
+ int_or_none,
+ xpath_text,
+)
+
+
+class NozIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?noz\.de/video/(?P<id>[0-9]+)/'
+ _TESTS = [{
+ 'url': 'http://www.noz.de/video/25151/32-Deutschland-gewinnt-Badminton-Lnderspiel-in-Melle',
+ 'info_dict': {
+ 'id': '25151',
+ 'ext': 'mp4',
+ 'duration': 215,
+ 'title': '3:2 - Deutschland gewinnt Badminton-Länderspiel in Melle',
+ 'description': 'Vor rund 370 Zuschauern gewinnt die deutsche Badminton-Nationalmannschaft am Donnerstag ein EM-Vorbereitungsspiel gegen Frankreich in Melle. Video Moritz Frankenberg.',
+ 'thumbnail': 're:^http://.*\.jpg',
+ },
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+ description = self._og_search_description(webpage)
+
+ edge_url = self._html_search_regex(
+ r'<script\s+(?:type="text/javascript"\s+)?src="(.*?/videojs_.*?)"',
+ webpage, 'edge URL')
+ edge_content = self._download_webpage(edge_url, 'meta configuration')
+
+ config_url_encoded = self._search_regex(
+ r'so\.addVariable\("config_url","[^,]*,(.*?)"',
+ edge_content, 'config URL'
+ )
+ config_url = compat_urllib_parse_unquote(config_url_encoded)
+
+ doc = self._download_xml(config_url, 'video configuration')
+ title = xpath_text(doc, './/title')
+ thumbnail = xpath_text(doc, './/article/thumbnail/url')
+ duration = int_or_none(xpath_text(
+ doc, './/article/movie/file/duration'))
+ formats = []
+ for qnode in doc.findall('.//article/movie/file/qualities/qual'):
+ video_node = qnode.find('./html_urls/video_url[@format="video/mp4"]')
+ if video_node is None:
+ continue # auto
+ formats.append({
+ 'url': video_node.text,
+ 'format_name': xpath_text(qnode, './name'),
+ 'format_id': xpath_text(qnode, './id'),
+ 'height': int_or_none(xpath_text(qnode, './height')),
+ 'width': int_or_none(xpath_text(qnode, './width')),
+ 'tbr': int_or_none(xpath_text(qnode, './bitrate'), scale=1000),
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'formats': formats,
+ 'title': title,
+ 'duration': duration,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ }
if not video_url:
continue
if format_id == 'adaptive':
- formats.extend(self._extract_m3u8_formats(video_url, video_id))
+ formats.extend(self._extract_m3u8_formats(video_url, video_id, 'mp4'))
else:
formats.append({
'url': video_url,
}
+class SchoolTVIE(InfoExtractor):
+ IE_NAME = 'schooltv'
+ _VALID_URL = r'https?://(?:www\.)?schooltv\.nl/video/(?P<id>[^/?#&]+)'
+
+ _TEST = {
+ 'url': 'http://www.schooltv.nl/video/ademhaling-de-hele-dag-haal-je-adem-maar-wat-gebeurt-er-dan-eigenlijk-in-je-lichaam/',
+ 'info_dict': {
+ 'id': 'WO_NTR_429477',
+ 'display_id': 'ademhaling-de-hele-dag-haal-je-adem-maar-wat-gebeurt-er-dan-eigenlijk-in-je-lichaam',
+ 'title': 'Ademhaling: De hele dag haal je adem. Maar wat gebeurt er dan eigenlijk in je lichaam?',
+ 'ext': 'mp4',
+ 'description': 'md5:abfa0ff690adb73fd0297fd033aaa631'
+ },
+ 'params': {
+ # Skip because of m3u8 download
+ 'skip_download': True
+ }
+ }
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+ video_id = self._search_regex(
+ r'data-mid=(["\'])(?P<id>.+?)\1', webpage, 'video_id', group='id')
+ return {
+ '_type': 'url_transparent',
+ 'ie_key': 'NPO',
+ 'url': 'npo:%s' % video_id,
+ 'display_id': display_id
+ }
+
+
class VPROIE(NPOIE):
IE_NAME = 'vpro'
_VALID_URL = r'https?://(?:www\.)?(?:tegenlicht\.)?vpro\.nl/(?:[^/]+/){2,}(?P<id>[^/]+)\.html'
--- /dev/null
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_urllib_parse
+from ..utils import (
+ int_or_none,
+ qualities,
+)
+
+
+class NprIE(InfoExtractor):
+ _VALID_URL = r'http://(?:www\.)?npr\.org/player/v2/mediaPlayer\.html\?.*\bid=(?P<id>\d+)'
+ _TESTS = [{
+ 'url': 'http://www.npr.org/player/v2/mediaPlayer.html?id=449974205',
+ 'info_dict': {
+ 'id': '449974205',
+ 'title': 'New Music From Beach House, Chairlift, CMJ Discoveries And More'
+ },
+ 'playlist_count': 7,
+ }, {
+ 'url': 'http://www.npr.org/player/v2/mediaPlayer.html?action=1&t=1&islist=false&id=446928052&m=446929930&live=1',
+ 'info_dict': {
+ 'id': '446928052',
+ 'title': "Songs We Love: Tigran Hamasyan, 'Your Mercy is Boundless'"
+ },
+ 'playlist': [{
+ 'md5': '12fa60cb2d3ed932f53609d4aeceabf1',
+ 'info_dict': {
+ 'id': '446929930',
+ 'ext': 'mp3',
+ 'title': 'Your Mercy is Boundless (Bazum en Qo gtutyunqd)',
+ 'duration': 402,
+ },
+ }],
+ }]
+
+ def _real_extract(self, url):
+ playlist_id = self._match_id(url)
+
+ config = self._download_json(
+ 'http://api.npr.org/query?%s' % compat_urllib_parse.urlencode({
+ 'id': playlist_id,
+ 'fields': 'titles,audio,show',
+ 'format': 'json',
+ 'apiKey': 'MDAzMzQ2MjAyMDEyMzk4MTU1MDg3ZmM3MQ010',
+ }), playlist_id)
+
+ story = config['list']['story'][0]
+
+ KNOWN_FORMATS = ('threegp', 'mp4', 'mp3')
+ quality = qualities(KNOWN_FORMATS)
+
+ entries = []
+ for audio in story.get('audio', []):
+ title = audio.get('title', {}).get('$text')
+ duration = int_or_none(audio.get('duration', {}).get('$text'))
+ formats = []
+ for format_id, formats_entry in audio.get('format', {}).items():
+ if not formats_entry:
+ continue
+ if isinstance(formats_entry, list):
+ formats_entry = formats_entry[0]
+ format_url = formats_entry.get('$text')
+ if not format_url:
+ continue
+ if format_id in KNOWN_FORMATS:
+ formats.append({
+ 'url': format_url,
+ 'format_id': format_id,
+ 'ext': formats_entry.get('type'),
+ 'quality': quality(format_id),
+ })
+ self._sort_formats(formats)
+ entries.append({
+ 'id': audio['id'],
+ 'title': title,
+ 'duration': duration,
+ 'formats': formats,
+ })
+
+ playlist_title = story.get('title', {}).get('$text')
+ return self.playlist_result(entries, playlist_id, playlist_title)
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
+ determine_ext,
ExtractorError,
float_or_none,
parse_duration,
'http://v8.psapi.nrk.no/mediaelement/%s' % video_id,
video_id, 'Downloading media JSON')
- if data['usageRights']['isGeoBlocked']:
- raise ExtractorError(
- 'NRK har ikke rettigheter til å vise dette programmet utenfor Norge',
- expected=True)
+ media_url = data.get('mediaUrl')
- video_url = data['mediaUrl'] + '?hdcore=3.5.0&plugin=aasp-3.5.0.151.81'
+ if not media_url:
+ if data['usageRights']['isGeoBlocked']:
+ raise ExtractorError(
+ 'NRK har ikke rettigheter til å vise dette programmet utenfor Norge',
+ expected=True)
+
+ if determine_ext(media_url) == 'f4m':
+ formats = self._extract_f4m_formats(
+ media_url + '?hdcore=3.5.0&plugin=aasp-3.5.0.151.81', video_id, f4m_id='hds')
+ else:
+ formats = [{
+ 'url': media_url,
+ 'ext': 'flv',
+ }]
duration = parse_duration(data.get('duration'))
return {
'id': video_id,
- 'url': video_url,
- 'ext': 'flv',
'title': data['title'],
'description': data['description'],
'duration': duration,
'thumbnail': thumbnail,
+ 'formats': formats,
}
_TESTS = [
{
'url': 'https://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014',
- 'md5': 'adf2c5454fa2bf032f47a9f8fb351342',
'info_dict': {
'id': 'MUHH48000314',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': '20 spørsmål',
'description': 'md5:bdea103bc35494c143c6a9acdd84887a',
'upload_date': '20140523',
'duration': 1741.52,
},
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
},
{
'url': 'https://tv.nrk.no/program/mdfp15000514',
- 'md5': '383650ece2b25ecec996ad7b5bb2a384',
'info_dict': {
'id': 'mdfp15000514',
- 'ext': 'flv',
- 'title': 'Kunnskapskanalen: Grunnlovsjubiléet - Stor ståhei for ingenting',
+ 'ext': 'mp4',
+ 'title': 'Grunnlovsjubiléet - Stor ståhei for ingenting',
'description': 'md5:654c12511f035aed1e42bdf5db3b206a',
'upload_date': '20140524',
- 'duration': 4605.0,
+ 'duration': 4605.08,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
},
},
{
from __future__ import unicode_literals
from .common import InfoExtractor
+from ..compat import compat_urlparse
from ..utils import (
int_or_none,
js_to_json,
webpage = self._download_webpage(url, video_id)
info = self._parse_json(self._search_regex(
- r'(?s)ntv.pageInfo.article =\s(\{.*?\});', webpage, 'info'),
+ r'(?s)ntv\.pageInfo\.article\s*=\s*(\{.*?\});', webpage, 'info'),
video_id, transform_source=js_to_json)
timestamp = int_or_none(info.get('publishedDateAsUnixTimeStamp'))
vdata = self._parse_json(self._search_regex(
webpage, 'player data'),
video_id, transform_source=js_to_json)
duration = parse_duration(vdata.get('duration'))
- formats = [{
- 'format_id': 'flash',
- 'url': 'rtmp://fms.n-tv.de/' + vdata['video'],
- }, {
- 'format_id': 'mobile',
- 'url': 'http://video.n-tv.de' + vdata['videoMp4'],
- 'tbr': 400, # estimation
- }]
- m3u8_url = 'http://video.n-tv.de' + vdata['videoM3u8']
- formats.extend(self._extract_m3u8_formats(
- m3u8_url, video_id, ext='mp4',
- entry_protocol='m3u8_native', preference=0))
+
+ formats = []
+ if vdata.get('video'):
+ formats.append({
+ 'format_id': 'flash',
+ 'url': 'rtmp://fms.n-tv.de/%s' % vdata['video'],
+ })
+ if vdata.get('videoMp4'):
+ formats.append({
+ 'format_id': 'mobile',
+ 'url': compat_urlparse.urljoin('http://video.n-tv.de', vdata['videoMp4']),
+ 'tbr': 400, # estimation
+ })
+ if vdata.get('videoM3u8'):
+ m3u8_url = compat_urlparse.urljoin('http://video.n-tv.de', vdata['videoM3u8'])
+ formats.extend(self._extract_m3u8_formats(
+ m3u8_url, video_id, ext='mp4', entry_protocol='m3u8_native',
+ preference=0, m3u8_id='hls', fatal=False))
self._sort_formats(formats)
return {
--- /dev/null
+# encoding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+from ..utils import (
+ float_or_none,
+ xpath_text
+)
+
+
+class NuevoBaseIE(InfoExtractor):
+ def _extract_nuevo(self, config_url, video_id):
+ config = self._download_xml(
+ config_url, video_id, transform_source=lambda s: s.strip())
+
+ title = xpath_text(config, './title', 'title', fatal=True).strip()
+ video_id = xpath_text(config, './mediaid', default=video_id)
+ thumbnail = xpath_text(config, ['./image', './thumb'])
+ duration = float_or_none(xpath_text(config, './duration'))
+
+ formats = []
+ for element_name, format_id in (('file', 'sd'), ('filehd', 'hd')):
+ video_url = xpath_text(config, element_name)
+ if video_url:
+ formats.append({
+ 'url': video_url,
+ 'format_id': format_id,
+ })
+ self._check_formats(formats, video_id)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'formats': formats
+ }
class OdnoklassnikiIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?(?:odnoklassniki|ok)\.ru/(?:video(?:embed)?|web-api/video/moviePlayer)/(?P<id>[\d-]+)'
+ _VALID_URL = r'https?://(?:(?:www|m|mobile)\.)?(?:odnoklassniki|ok)\.ru/(?:video(?:embed)?|web-api/video/moviePlayer)/(?P<id>[\d-]+)'
_TESTS = [{
# metadata in JSON
'url': 'http://ok.ru/video/20079905452',
}, {
'url': 'http://www.ok.ru/videoembed/20648036891',
'only_matching': True,
+ }, {
+ 'url': 'http://m.ok.ru/video/20079905452',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://mobile.ok.ru/video/20079905452',
+ 'only_matching': True,
}]
def _real_extract(self, url):
from __future__ import unicode_literals
import re
-import json
import base64
from .common import InfoExtractor
from ..utils import (
- unescapeHTML,
- ExtractorError,
- determine_ext,
int_or_none,
+ float_or_none,
+ ExtractorError,
+ unsmuggle_url,
)
+from ..compat import compat_urllib_parse
class OoyalaBaseIE(InfoExtractor):
-
- def _extract_result(self, info, more_info):
- embedCode = info['embedCode']
- video_url = info.get('ipad_url') or info['url']
-
- if determine_ext(video_url) == 'm3u8':
- formats = self._extract_m3u8_formats(video_url, embedCode, ext='mp4')
- else:
- formats = [{
- 'url': video_url,
- 'ext': 'mp4',
- }]
-
- return {
- 'id': embedCode,
- 'title': unescapeHTML(info['title']),
- 'formats': formats,
- 'description': unescapeHTML(more_info['description']),
- 'thumbnail': more_info['promo'],
+ _PLAYER_BASE = 'http://player.ooyala.com/'
+ _CONTENT_TREE_BASE = _PLAYER_BASE + 'player_api/v1/content_tree/'
+ _AUTHORIZATION_URL_TEMPLATE = _PLAYER_BASE + 'sas/player_api/v1/authorization/embed_code/%s/%s?'
+
+ def _extract(self, content_tree_url, video_id, domain='example.org'):
+ content_tree = self._download_json(content_tree_url, video_id)['content_tree']
+ metadata = content_tree[list(content_tree)[0]]
+ embed_code = metadata['embed_code']
+ pcode = metadata.get('asset_pcode') or embed_code
+ video_info = {
+ 'id': embed_code,
+ 'title': metadata['title'],
+ 'description': metadata.get('description'),
+ 'thumbnail': metadata.get('thumbnail_image') or metadata.get('promo_image'),
+ 'duration': float_or_none(metadata.get('duration'), 1000),
}
- def _extract(self, player_url, video_id):
- player = self._download_webpage(player_url, video_id)
- mobile_url = self._search_regex(r'mobile_player_url="(.+?)&device="',
- player, 'mobile player url')
- # Looks like some videos are only available for particular devices
- # (e.g. http://player.ooyala.com/player.js?embedCode=x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0
- # is only available for ipad)
- # Working around with fetching URLs for all the devices found starting with 'unknown'
- # until we succeed or eventually fail for each device.
- devices = re.findall(r'device\s*=\s*"([^"]+)";', player)
- devices.remove('unknown')
- devices.insert(0, 'unknown')
- for device in devices:
- mobile_player = self._download_webpage(
- '%s&device=%s' % (mobile_url, device), video_id,
- 'Downloading mobile player JS for %s device' % device)
- videos_info = self._search_regex(
- r'var streams=window.oo_testEnv\?\[\]:eval\("\((\[{.*?}\])\)"\);',
- mobile_player, 'info', fatal=False, default=None)
- if videos_info:
- break
-
- if not videos_info:
- formats = []
+ urls = []
+ formats = []
+ for supported_format in ('mp4', 'm3u8', 'hds', 'rtmp'):
auth_data = self._download_json(
- 'http://player.ooyala.com/sas/player_api/v1/authorization/embed_code/%s/%s?domain=www.example.org&supportedFormats=mp4,webm' % (video_id, video_id),
- video_id)
-
- cur_auth_data = auth_data['authorization_data'][video_id]
-
- for stream in cur_auth_data['streams']:
- formats.append({
- 'url': base64.b64decode(stream['url']['data'].encode('ascii')).decode('utf-8'),
- 'ext': stream.get('delivery_type'),
- 'format': stream.get('video_codec'),
- 'format_id': stream.get('profile'),
- 'width': int_or_none(stream.get('width')),
- 'height': int_or_none(stream.get('height')),
- 'abr': int_or_none(stream.get('audio_bitrate')),
- 'vbr': int_or_none(stream.get('video_bitrate')),
- })
- if formats:
- return {
- 'id': video_id,
- 'formats': formats,
- 'title': 'Ooyala video',
- }
-
- if not cur_auth_data['authorized']:
- raise ExtractorError(cur_auth_data['message'], expected=True)
-
- if not videos_info:
- raise ExtractorError('Unable to extract info')
- videos_info = videos_info.replace('\\"', '"')
- videos_more_info = self._search_regex(
- r'eval\("\(({.*?\\"promo\\".*?})\)"', mobile_player, 'more info').replace('\\"', '"')
- videos_info = json.loads(videos_info)
- videos_more_info = json.loads(videos_more_info)
-
- if videos_more_info.get('lineup'):
- videos = [self._extract_result(info, more_info) for (info, more_info) in zip(videos_info, videos_more_info['lineup'])]
- return {
- '_type': 'playlist',
- 'id': video_id,
- 'title': unescapeHTML(videos_more_info['title']),
- 'entries': videos,
- }
- else:
- return self._extract_result(videos_info[0], videos_more_info)
+ self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code) +
+ compat_urllib_parse.urlencode({
+ 'domain': domain,
+ 'supportedFormats': supported_format
+ }),
+ video_id, 'Downloading %s JSON' % supported_format)
+
+ cur_auth_data = auth_data['authorization_data'][embed_code]
+
+ if cur_auth_data['authorized']:
+ for stream in cur_auth_data['streams']:
+ url = base64.b64decode(
+ stream['url']['data'].encode('ascii')).decode('utf-8')
+ if url in urls:
+ continue
+ urls.append(url)
+ delivery_type = stream['delivery_type']
+ if delivery_type == 'hls' or '.m3u8' in url:
+ formats.extend(self._extract_m3u8_formats(
+ url, embed_code, 'mp4', 'm3u8_native',
+ m3u8_id='hls', fatal=False))
+ elif delivery_type == 'hds' or '.f4m' in url:
+ formats.extend(self._extract_f4m_formats(
+ url + '?hdcore=3.7.0', embed_code, f4m_id='hds', fatal=False))
+ elif '.smil' in url:
+ formats.extend(self._extract_smil_formats(
+ url, embed_code, fatal=False))
+ else:
+ formats.append({
+ 'url': url,
+ 'ext': stream.get('delivery_type'),
+ 'vcodec': stream.get('video_codec'),
+ 'format_id': delivery_type,
+ 'width': int_or_none(stream.get('width')),
+ 'height': int_or_none(stream.get('height')),
+ 'abr': int_or_none(stream.get('audio_bitrate')),
+ 'vbr': int_or_none(stream.get('video_bitrate')),
+ 'fps': float_or_none(stream.get('framerate')),
+ })
+ else:
+ raise ExtractorError('%s said: %s' % (
+ self.IE_NAME, cur_auth_data['message']), expected=True)
+ self._sort_formats(formats)
+
+ video_info['formats'] = formats
+ return video_info
class OoyalaIE(OoyalaBaseIE):
'ext': 'mp4',
'title': 'Explaining Data Recovery from Hard Drives and SSDs',
'description': 'How badly damaged does a drive have to be to defeat Russell and his crew? Apparently, smashed to bits.',
+ 'duration': 853.386,
},
}, {
# Only available for ipad
'id': 'x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0',
'ext': 'mp4',
'title': 'Simulation Overview - Levels of Simulation',
- 'description': '',
+ 'duration': 194.948,
},
},
{
'info_dict': {
'id': 'FiOG81ZTrvckcchQxmalf4aQj590qTEx',
'ext': 'mp4',
- 'title': 'Ooyala video',
+ 'title': 'Divide Tool Path.mp4',
+ 'duration': 204.405,
}
}
]
ie=cls.ie_key())
def _real_extract(self, url):
+ url, smuggled_data = unsmuggle_url(url, {})
embed_code = self._match_id(url)
- player_url = 'http://player.ooyala.com/player.js?embedCode=%s' % embed_code
- return self._extract(player_url, embed_code)
+ domain = smuggled_data.get('domain')
+ content_tree_url = self._CONTENT_TREE_BASE + 'embed_code/%s/%s' % (embed_code, embed_code)
+ return self._extract(content_tree_url, embed_code, domain)
class OoyalaExternalIE(OoyalaBaseIE):
.*?&pcode=
)
(?P<pcode>.+?)
- (&|$)
+ (?:&|$)
'''
_TEST = {
'id': 'FkYWtmazr6Ed8xmvILvKLWjd4QvYZpzG',
'ext': 'mp4',
'title': 'dm_140128_30for30Shorts___JudgingJewellv2',
- 'description': '',
+ 'duration': 1302.0,
},
'params': {
# m3u8 download
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- partner_id = mobj.group('partner_id')
- video_id = mobj.group('id')
- pcode = mobj.group('pcode')
- player_url = 'http://player.ooyala.com/player.js?externalId=%s:%s&pcode=%s' % (partner_id, video_id, pcode)
- return self._extract(player_url, video_id)
+ partner_id, video_id, pcode = re.match(self._VALID_URL, url).groups()
+ content_tree_url = self._CONTENT_TREE_BASE + 'external_id/%s/%s:%s' % (pcode, partner_id, video_id)
+ return self._extract(content_tree_url, video_id)
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+from .common import InfoExtractor
+from ..compat import compat_urlparse
+from ..utils import (
+ get_element_by_attribute,
+ qualities,
+ unescapeHTML,
+)
+
+
+class OraTVIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?ora\.tv/([^/]+/)*(?P<id>[^/\?#]+)'
+ _TEST = {
+ 'url': 'https://www.ora.tv/larrykingnow/2015/12/16/vine-youtube-stars-zach-king-king-bach-on-their-viral-videos-0_36jupg6090pq',
+ 'md5': 'fa33717591c631ec93b04b0e330df786',
+ 'info_dict': {
+ 'id': '50178',
+ 'ext': 'mp4',
+ 'title': 'Vine & YouTube Stars Zach King & King Bach On Their Viral Videos!',
+ 'description': 'md5:ebbc5b1424dd5dba7be7538148287ac1',
+ }
+ }
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+
+ video_data = self._search_regex(
+ r'"(?:video|current)"\s*:\s*({[^}]+?})', webpage, 'current video')
+ m3u8_url = self._search_regex(
+ r'hls_stream"?\s*:\s*"([^"]+)', video_data, 'm3u8 url', None)
+ if m3u8_url:
+ formats = self._extract_m3u8_formats(
+ m3u8_url, display_id, 'mp4', 'm3u8_native',
+ m3u8_id='hls', fatal=False)
+ # similar to GameSpotIE
+ m3u8_path = compat_urlparse.urlparse(m3u8_url).path
+ QUALITIES_RE = r'((,[a-z]+\d+)+,?)'
+ available_qualities = self._search_regex(
+ QUALITIES_RE, m3u8_path, 'qualities').strip(',').split(',')
+ http_path = m3u8_path[1:].split('/', 1)[1]
+ http_template = re.sub(QUALITIES_RE, r'%s', http_path)
+ http_template = http_template.replace('.csmil/master.m3u8', '')
+ http_template = compat_urlparse.urljoin(
+ 'http://videocdn-pmd.ora.tv/', http_template)
+ preference = qualities(
+ ['mobile400', 'basic400', 'basic600', 'sd900', 'sd1200', 'sd1500', 'hd720', 'hd1080'])
+ for q in available_qualities:
+ formats.append({
+ 'url': http_template % q,
+ 'format_id': q,
+ 'preference': preference(q),
+ })
+ self._sort_formats(formats)
+ else:
+ return self.url_result(self._search_regex(
+ r'"youtube_id"\s*:\s*"([^"]+)', webpage, 'youtube id'), 'Youtube')
+
+ return {
+ 'id': self._search_regex(
+ r'"id"\s*:\s*(\d+)', video_data, 'video id', default=display_id),
+ 'display_id': display_id,
+ 'title': unescapeHTML(self._og_search_title(webpage)),
+ 'description': get_element_by_attribute(
+ 'class', 'video_txt_decription', webpage),
+ 'thumbnail': self._proto_relative_url(self._search_regex(
+ r'"thumb"\s*:\s*"([^"]+)', video_data, 'thumbnail', None)),
+ 'formats': formats,
+ }
% geo_str),
fatal=False)
+ self._check_formats(formats, video_id)
self._sort_formats(formats)
upload_date = unified_strdate(sd['created_date'])
class ORFFM4IE(InfoExtractor):
IE_NAME = 'orf:fm4'
IE_DESC = 'radio FM4'
- _VALID_URL = r'http://fm4\.orf\.at/7tage/?#(?P<date>[0-9]+)/(?P<show>\w+)'
+ _VALID_URL = r'http://fm4\.orf\.at/(?:7tage/?#|player/)(?P<date>[0-9]+)/(?P<show>\w+)'
+
+ _TEST = {
+ 'url': 'http://fm4.orf.at/player/20160110/IS/',
+ 'md5': '01e736e8f1cef7e13246e880a59ad298',
+ 'info_dict': {
+ 'id': '2016-01-10_2100_tl_54_7DaysSun13_11244',
+ 'ext': 'mp3',
+ 'title': 'Im Sumpf',
+ 'description': 'md5:384c543f866c4e422a55f66a62d669cd',
+ 'duration': 7173,
+ 'timestamp': 1452456073,
+ 'upload_date': '20160110',
+ },
+ }
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
--- /dev/null
+# encoding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import (
+ compat_str,
+ compat_urlparse,
+)
+from ..utils import (
+ ExtractorError,
+ float_or_none,
+ parse_duration,
+ str_to_int,
+)
+
+
+class PandoraTVIE(InfoExtractor):
+ IE_NAME = 'pandora.tv'
+ IE_DESC = '판도라TV'
+ _VALID_URL = r'https?://(?:.+?\.)?channel\.pandora\.tv/channel/video\.ptv\?'
+ _TEST = {
+ 'url': 'http://jp.channel.pandora.tv/channel/video.ptv?c1=&prgid=53294230&ch_userid=mikakim&ref=main&lot=cate_01_2',
+ 'info_dict': {
+ 'id': '53294230',
+ 'ext': 'flv',
+ 'title': '頭を撫でてくれる?',
+ 'description': '頭を撫でてくれる?',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'duration': 39,
+ 'upload_date': '20151218',
+ 'uploader': 'カワイイ動物まとめ',
+ 'uploader_id': 'mikakim',
+ 'view_count': int,
+ 'like_count': int,
+ }
+ }
+
+ def _real_extract(self, url):
+ qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+ video_id = qs.get('prgid', [None])[0]
+ user_id = qs.get('ch_userid', [None])[0]
+ if any(not f for f in (video_id, user_id,)):
+ raise ExtractorError('Invalid URL', expected=True)
+
+ data = self._download_json(
+ 'http://m.pandora.tv/?c=view&m=viewJsonApi&ch_userid=%s&prgid=%s'
+ % (user_id, video_id), video_id)
+
+ info = data['data']['rows']['vod_play_info']['result']
+
+ formats = []
+ for format_id, format_url in info.items():
+ if not format_url:
+ continue
+ height = self._search_regex(
+ r'^v(\d+)[Uu]rl$', format_id, 'height', default=None)
+ if not height:
+ continue
+ formats.append({
+ 'format_id': '%sp' % height,
+ 'url': format_url,
+ 'height': int(height),
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': info['subject'],
+ 'description': info.get('body'),
+ 'thumbnail': info.get('thumbnail') or info.get('poster'),
+ 'duration': float_or_none(info.get('runtime'), 1000) or parse_duration(info.get('time')),
+ 'upload_date': info['fid'][:8] if isinstance(info.get('fid'), compat_str) else None,
+ 'uploader': info.get('nickname'),
+ 'uploader_id': info.get('upload_userid'),
+ 'view_count': str_to_int(info.get('hit')),
+ 'like_count': str_to_int(info.get('likecnt')),
+ 'formats': formats,
+ }
import re
from .common import InfoExtractor
+from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
+ js_to_json,
strip_jsonp,
unified_strdate,
US_RATINGS,
class PBSIE(InfoExtractor):
+ _STATIONS = (
+ (r'(?:video|www|player)\.pbs\.org', 'PBS: Public Broadcasting Service'), # http://www.pbs.org/
+ (r'video\.aptv\.org', 'APT - Alabama Public Television (WBIQ)'), # http://aptv.org/
+ (r'video\.gpb\.org', 'GPB/Georgia Public Broadcasting (WGTV)'), # http://www.gpb.org/
+ (r'video\.mpbonline\.org', 'Mississippi Public Broadcasting (WMPN)'), # http://www.mpbonline.org
+ (r'video\.wnpt\.org', 'Nashville Public Television (WNPT)'), # http://www.wnpt.org
+ (r'video\.wfsu\.org', 'WFSU-TV (WFSU)'), # http://wfsu.org/
+ (r'video\.wsre\.org', 'WSRE (WSRE)'), # http://www.wsre.org
+ (r'video\.wtcitv\.org', 'WTCI (WTCI)'), # http://www.wtcitv.org
+ (r'video\.pba\.org', 'WPBA/Channel 30 (WPBA)'), # http://pba.org/
+ (r'video\.alaskapublic\.org', 'Alaska Public Media (KAKM)'), # http://alaskapublic.org/kakm
+ # (r'kuac\.org', 'KUAC (KUAC)'), # http://kuac.org/kuac-tv/
+ # (r'ktoo\.org', '360 North (KTOO)'), # http://www.ktoo.org/
+ # (r'azpm\.org', 'KUAT 6 (KUAT)'), # http://www.azpm.org/
+ (r'video\.azpbs\.org', 'Arizona PBS (KAET)'), # http://www.azpbs.org
+ (r'portal\.knme\.org', 'KNME-TV/Channel 5 (KNME)'), # http://www.newmexicopbs.org/
+ (r'video\.vegaspbs\.org', 'Vegas PBS (KLVX)'), # http://vegaspbs.org/
+ (r'watch\.aetn\.org', 'AETN/ARKANSAS ETV NETWORK (KETS)'), # http://www.aetn.org/
+ (r'video\.ket\.org', 'KET (WKLE)'), # http://www.ket.org/
+ (r'video\.wkno\.org', 'WKNO/Channel 10 (WKNO)'), # http://www.wkno.org/
+ (r'video\.lpb\.org', 'LPB/LOUISIANA PUBLIC BROADCASTING (WLPB)'), # http://www.lpb.org/
+ (r'videos\.oeta\.tv', 'OETA (KETA)'), # http://www.oeta.tv
+ (r'video\.optv\.org', 'Ozarks Public Television (KOZK)'), # http://www.optv.org/
+ (r'watch\.wsiu\.org', 'WSIU Public Broadcasting (WSIU)'), # http://www.wsiu.org/
+ (r'video\.keet\.org', 'KEET TV (KEET)'), # http://www.keet.org
+ (r'pbs\.kixe\.org', 'KIXE/Channel 9 (KIXE)'), # http://kixe.org/
+ (r'video\.kpbs\.org', 'KPBS San Diego (KPBS)'), # http://www.kpbs.org/
+ (r'video\.kqed\.org', 'KQED (KQED)'), # http://www.kqed.org
+ (r'vids\.kvie\.org', 'KVIE Public Television (KVIE)'), # http://www.kvie.org
+ (r'video\.pbssocal\.org', 'PBS SoCal/KOCE (KOCE)'), # http://www.pbssocal.org/
+ (r'video\.valleypbs\.org', 'ValleyPBS (KVPT)'), # http://www.valleypbs.org/
+ (r'video\.cptv\.org', 'CONNECTICUT PUBLIC TELEVISION (WEDH)'), # http://cptv.org
+ (r'watch\.knpb\.org', 'KNPB Channel 5 (KNPB)'), # http://www.knpb.org/
+ (r'video\.soptv\.org', 'SOPTV (KSYS)'), # http://www.soptv.org
+ # (r'klcs\.org', 'KLCS/Channel 58 (KLCS)'), # http://www.klcs.org
+ # (r'krcb\.org', 'KRCB Television & Radio (KRCB)'), # http://www.krcb.org
+ # (r'kvcr\.org', 'KVCR TV/DT/FM :: Vision for the Future (KVCR)'), # http://kvcr.org
+ (r'video\.rmpbs\.org', 'Rocky Mountain PBS (KRMA)'), # http://www.rmpbs.org
+ (r'video\.kenw\.org', 'KENW-TV3 (KENW)'), # http://www.kenw.org
+ (r'video\.kued\.org', 'KUED Channel 7 (KUED)'), # http://www.kued.org
+ (r'video\.wyomingpbs\.org', 'Wyoming PBS (KCWC)'), # http://www.wyomingpbs.org
+ (r'video\.cpt12\.org', 'Colorado Public Television / KBDI 12 (KBDI)'), # http://www.cpt12.org/
+ (r'video\.kbyueleven\.org', 'KBYU-TV (KBYU)'), # http://www.kbyutv.org/
+ (r'video\.thirteen\.org', 'Thirteen/WNET New York (WNET)'), # http://www.thirteen.org
+ (r'video\.wgbh\.org', 'WGBH/Channel 2 (WGBH)'), # http://wgbh.org
+ (r'video\.wgby\.org', 'WGBY (WGBY)'), # http://www.wgby.org
+ (r'watch\.njtvonline\.org', 'NJTV Public Media NJ (WNJT)'), # http://www.njtvonline.org/
+ # (r'ripbs\.org', 'Rhode Island PBS (WSBE)'), # http://www.ripbs.org/home/
+ (r'watch\.wliw\.org', 'WLIW21 (WLIW)'), # http://www.wliw.org/
+ (r'video\.mpt\.tv', 'mpt/Maryland Public Television (WMPB)'), # http://www.mpt.org
+ (r'watch\.weta\.org', 'WETA Television and Radio (WETA)'), # http://www.weta.org
+ (r'video\.whyy\.org', 'WHYY (WHYY)'), # http://www.whyy.org
+ (r'video\.wlvt\.org', 'PBS 39 (WLVT)'), # http://www.wlvt.org/
+ (r'video\.wvpt\.net', 'WVPT - Your Source for PBS and More! (WVPT)'), # http://www.wvpt.net
+ (r'video\.whut\.org', 'Howard University Television (WHUT)'), # http://www.whut.org
+ (r'video\.wedu\.org', 'WEDU PBS (WEDU)'), # http://www.wedu.org
+ (r'video\.wgcu\.org', 'WGCU Public Media (WGCU)'), # http://www.wgcu.org/
+ # (r'wjct\.org', 'WJCT Public Broadcasting (WJCT)'), # http://www.wjct.org
+ (r'video\.wpbt2\.org', 'WPBT2 (WPBT)'), # http://www.wpbt2.org
+ (r'video\.wucftv\.org', 'WUCF TV (WUCF)'), # http://wucftv.org
+ (r'video\.wuft\.org', 'WUFT/Channel 5 (WUFT)'), # http://www.wuft.org
+ (r'watch\.wxel\.org', 'WXEL/Channel 42 (WXEL)'), # http://www.wxel.org/home/
+ (r'video\.wlrn\.org', 'WLRN/Channel 17 (WLRN)'), # http://www.wlrn.org/
+ (r'video\.wusf\.usf\.edu', 'WUSF Public Broadcasting (WUSF)'), # http://wusf.org/
+ (r'video\.scetv\.org', 'ETV (WRLK)'), # http://www.scetv.org
+ (r'video\.unctv\.org', 'UNC-TV (WUNC)'), # http://www.unctv.org/
+ # (r'pbsguam\.org', 'PBS Guam (KGTF)'), # http://www.pbsguam.org/
+ (r'video\.pbshawaii\.org', 'PBS Hawaii - Oceanic Cable Channel 10 (KHET)'), # http://www.pbshawaii.org/
+ (r'video\.idahoptv\.org', 'Idaho Public Television (KAID)'), # http://idahoptv.org
+ (r'video\.ksps\.org', 'KSPS (KSPS)'), # http://www.ksps.org/home/
+ (r'watch\.opb\.org', 'OPB (KOPB)'), # http://www.opb.org
+ (r'watch\.nwptv\.org', 'KWSU/Channel 10 & KTNW/Channel 31 (KWSU)'), # http://www.kwsu.org
+ (r'video\.will\.illinois\.edu', 'WILL-TV (WILL)'), # http://will.illinois.edu/
+ (r'video\.networkknowledge\.tv', 'Network Knowledge - WSEC/Springfield (WSEC)'), # http://www.wsec.tv
+ (r'video\.wttw\.com', 'WTTW11 (WTTW)'), # http://www.wttw.com/
+ # (r'wtvp\.org', 'WTVP & WTVP.org, Public Media for Central Illinois (WTVP)'), # http://www.wtvp.org/
+ (r'video\.iptv\.org', 'Iowa Public Television/IPTV (KDIN)'), # http://www.iptv.org/
+ (r'video\.ninenet\.org', 'Nine Network (KETC)'), # http://www.ninenet.org
+ (r'video\.wfwa\.org', 'PBS39 Fort Wayne (WFWA)'), # http://wfwa.org/
+ (r'video\.wfyi\.org', 'WFYI Indianapolis (WFYI)'), # http://www.wfyi.org
+ (r'video\.mptv\.org', 'Milwaukee Public Television (WMVS)'), # http://www.mptv.org
+ (r'video\.wnin\.org', 'WNIN (WNIN)'), # http://www.wnin.org/
+ (r'video\.wnit\.org', 'WNIT Public Television (WNIT)'), # http://www.wnit.org/
+ (r'video\.wpt\.org', 'WPT (WPNE)'), # http://www.wpt.org/
+ (r'video\.wvut\.org', 'WVUT/Channel 22 (WVUT)'), # http://wvut.org/
+ (r'video\.weiu\.net', 'WEIU/Channel 51 (WEIU)'), # http://www.weiu.net
+ (r'video\.wqpt\.org', 'WQPT-TV (WQPT)'), # http://www.wqpt.org
+ (r'video\.wycc\.org', 'WYCC PBS Chicago (WYCC)'), # http://www.wycc.org
+ # (r'lakeshorepublicmedia\.org', 'Lakeshore Public Television (WYIN)'), # http://lakeshorepublicmedia.org/
+ (r'video\.wipb\.org', 'WIPB-TV (WIPB)'), # http://wipb.org
+ (r'video\.indianapublicmedia\.org', 'WTIU (WTIU)'), # http://indianapublicmedia.org/tv/
+ (r'watch\.cetconnect\.org', 'CET (WCET)'), # http://www.cetconnect.org
+ (r'video\.thinktv\.org', 'ThinkTVNetwork (WPTD)'), # http://www.thinktv.org
+ (r'video\.wbgu\.org', 'WBGU-TV (WBGU)'), # http://wbgu.org
+ (r'video\.wgvu\.org', 'WGVU TV (WGVU)'), # http://www.wgvu.org/
+ (r'video\.netnebraska\.org', 'NET1 (KUON)'), # http://netnebraska.org
+ (r'video\.pioneer\.org', 'Pioneer Public Television (KWCM)'), # http://www.pioneer.org
+ (r'watch\.sdpb\.org', 'SDPB Television (KUSD)'), # http://www.sdpb.org
+ (r'video\.tpt\.org', 'TPT (KTCA)'), # http://www.tpt.org
+ (r'watch\.ksmq\.org', 'KSMQ (KSMQ)'), # http://www.ksmq.org/
+ (r'watch\.kpts\.org', 'KPTS/Channel 8 (KPTS)'), # http://www.kpts.org/
+ (r'watch\.ktwu\.org', 'KTWU/Channel 11 (KTWU)'), # http://ktwu.org
+ # (r'shptv\.org', 'Smoky Hills Public Television (KOOD)'), # http://www.shptv.org
+ # (r'kcpt\.org', 'KCPT Kansas City Public Television (KCPT)'), # http://kcpt.org/
+ # (r'blueridgepbs\.org', 'Blue Ridge PBS (WBRA)'), # http://www.blueridgepbs.org/
+ (r'watch\.easttennesseepbs\.org', 'East Tennessee PBS (WSJK)'), # http://easttennesseepbs.org
+ (r'video\.wcte\.tv', 'WCTE-TV (WCTE)'), # http://www.wcte.org
+ (r'video\.wljt\.org', 'WLJT, Channel 11 (WLJT)'), # http://wljt.org/
+ (r'video\.wosu\.org', 'WOSU TV (WOSU)'), # http://wosu.org/
+ (r'video\.woub\.org', 'WOUB/WOUC (WOUB)'), # http://woub.org/tv/index.php?section=5
+ (r'video\.wvpublic\.org', 'WVPB (WVPB)'), # http://wvpublic.org/
+ (r'video\.wkyupbs\.org', 'WKYU-PBS (WKYU)'), # http://www.wkyupbs.org
+ # (r'wyes\.org', 'WYES-TV/New Orleans (WYES)'), # http://www.wyes.org
+ (r'video\.kera\.org', 'KERA 13 (KERA)'), # http://www.kera.org/
+ (r'video\.mpbn\.net', 'MPBN (WCBB)'), # http://www.mpbn.net/
+ (r'video\.mountainlake\.org', 'Mountain Lake PBS (WCFE)'), # http://www.mountainlake.org/
+ (r'video\.nhptv\.org', 'NHPTV (WENH)'), # http://nhptv.org/
+ (r'video\.vpt\.org', 'Vermont PBS (WETK)'), # http://www.vpt.org
+ (r'video\.witf\.org', 'witf (WITF)'), # http://www.witf.org
+ (r'watch\.wqed\.org', 'WQED Multimedia (WQED)'), # http://www.wqed.org/
+ (r'video\.wmht\.org', 'WMHT Educational Telecommunications (WMHT)'), # http://www.wmht.org/home/
+ (r'video\.deltabroadcasting\.org', 'Q-TV (WDCQ)'), # http://www.deltabroadcasting.org
+ (r'video\.dptv\.org', 'WTVS Detroit Public TV (WTVS)'), # http://www.dptv.org/
+ (r'video\.wcmu\.org', 'CMU Public Television (WCMU)'), # http://www.wcmu.org
+ (r'video\.wkar\.org', 'WKAR-TV (WKAR)'), # http://wkar.org/
+ (r'wnmuvideo\.nmu\.edu', 'WNMU-TV Public TV 13 (WNMU)'), # http://wnmutv.nmu.edu
+ (r'video\.wdse\.org', 'WDSE - WRPT (WDSE)'), # http://www.wdse.org/
+ (r'video\.wgte\.org', 'WGTE TV (WGTE)'), # http://www.wgte.org
+ (r'video\.lptv\.org', 'Lakeland Public Television (KAWE)'), # http://www.lakelandptv.org
+ # (r'prairiepublic\.org', 'PRAIRIE PUBLIC (KFME)'), # http://www.prairiepublic.org/
+ (r'video\.kmos\.org', 'KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS)'), # http://www.kmos.org/
+ (r'watch\.montanapbs\.org', 'MontanaPBS (KUSM)'), # http://montanapbs.org
+ (r'video\.krwg\.org', 'KRWG/Channel 22 (KRWG)'), # http://www.krwg.org
+ (r'video\.kacvtv\.org', 'KACV (KACV)'), # http://www.panhandlepbs.org/home/
+ (r'video\.kcostv\.org', 'KCOS/Channel 13 (KCOS)'), # www.kcostv.org
+ (r'video\.wcny\.org', 'WCNY/Channel 24 (WCNY)'), # http://www.wcny.org
+ (r'video\.wned\.org', 'WNED (WNED)'), # http://www.wned.org/
+ (r'watch\.wpbstv\.org', 'WPBS (WPBS)'), # http://www.wpbstv.org
+ (r'video\.wskg\.org', 'WSKG Public TV (WSKG)'), # http://wskg.org
+ (r'video\.wxxi\.org', 'WXXI (WXXI)'), # http://wxxi.org
+ (r'video\.wpsu\.org', 'WPSU (WPSU)'), # http://www.wpsu.org
+ # (r'wqln\.org', 'WQLN/Channel 54 (WQLN)'), # http://www.wqln.org
+ (r'on-demand\.wvia\.org', 'WVIA Public Media Studios (WVIA)'), # http://www.wvia.org/
+ (r'video\.wtvi\.org', 'WTVI (WTVI)'), # http://www.wtvi.org/
+ # (r'whro\.org', 'WHRO (WHRO)'), # http://whro.org
+ (r'video\.westernreservepublicmedia\.org', 'Western Reserve PBS (WNEO)'), # http://www.WesternReservePublicMedia.org/
+ (r'video\.ideastream\.org', 'WVIZ/PBS ideastream (WVIZ)'), # http://www.wviz.org/
+ (r'video\.kcts9\.org', 'KCTS 9 (KCTS)'), # http://kcts9.org/
+ (r'video\.basinpbs\.org', 'Basin PBS (KPBT)'), # http://www.basinpbs.org
+ (r'video\.houstonpbs\.org', 'KUHT / Channel 8 (KUHT)'), # http://www.houstonpublicmedia.org/
+ # (r'tamu\.edu', 'KAMU - TV (KAMU)'), # http://KAMU.tamu.edu
+ # (r'kedt\.org', 'KEDT/Channel 16 (KEDT)'), # http://www.kedt.org
+ (r'video\.klrn\.org', 'KLRN (KLRN)'), # http://www.klrn.org
+ (r'video\.klru\.tv', 'KLRU (KLRU)'), # http://www.klru.org
+ # (r'kmbh\.org', 'KMBH-TV (KMBH)'), # http://www.kmbh.org
+ # (r'knct\.org', 'KNCT (KNCT)'), # http://www.knct.org
+ # (r'ktxt\.org', 'KTTZ-TV (KTXT)'), # http://www.ktxt.org
+ (r'video\.wtjx\.org', 'WTJX Channel 12 (WTJX)'), # http://www.wtjx.org/
+ (r'video\.ideastations\.org', 'WCVE PBS (WCVE)'), # http://ideastations.org/
+ (r'video\.kbtc\.org', 'KBTC Public Television (KBTC)'), # http://kbtc.org
+ )
+
+ IE_NAME = 'pbs'
+ IE_DESC = 'Public Broadcasting Service (PBS) and member stations: %s' % ', '.join(list(zip(*_STATIONS))[1])
+
_VALID_URL = r'''(?x)https?://
(?:
# Direct video URL
- video\.pbs\.org/(?:viralplayer|video)/(?P<id>[0-9]+)/? |
+ (?:%s)/(?:viralplayer|video)/(?P<id>[0-9]+)/? |
# Article with embedded player (or direct video)
(?:www\.)?pbs\.org/(?:[^/]+/){2,5}(?P<presumptive_id>[^/]+?)(?:\.html)?/?(?:$|[?\#]) |
# Player
(?:video|player)\.pbs\.org/(?:widget/)?partnerplayer/(?P<player_id>[^/]+)/
)
- '''
+ ''' % '|'.join(list(zip(*_STATIONS))[0])
_TESTS = [
{
'id': '2365006249',
'ext': 'mp4',
'title': 'Constitution USA with Peter Sagal - A More Perfect Union',
- 'description': 'md5:ba0c207295339c8d6eced00b7c363c6a',
+ 'description': 'md5:36f341ae62e251b8f5bd2b754b95a071',
'duration': 3190,
},
'params': {
'id': '2365297690',
'ext': 'mp4',
'title': 'FRONTLINE - Losing Iraq',
- 'description': 'md5:f5bfbefadf421e8bb8647602011caf8e',
+ 'description': 'md5:4d3eaa01f94e61b3e73704735f1196d9',
'duration': 5050,
},
'params': {
'id': '2201174722',
'ext': 'mp4',
'title': 'PBS NewsHour - Cyber Schools Gain Popularity, but Quality Questions Persist',
- 'description': 'md5:5871c15cba347c1b3d28ac47a73c7c28',
+ 'description': 'md5:95a19f568689d09a166dff9edada3301',
'duration': 801,
},
},
'info_dict': {
'id': '2365297708',
'ext': 'mp4',
- 'description': 'md5:68d87ef760660eb564455eb30ca464fe',
'title': 'Great Performances - Dudamel Conducts Verdi Requiem at the Hollywood Bowl - Full',
+ 'description': 'md5:657897370e09e2bc6bf0f8d2cd313c6b',
'duration': 6559,
'thumbnail': 're:^https?://.*\.jpg$',
},
'display_id': 'player',
'ext': 'mp4',
'title': 'American Experience - Death and the Civil War, Chapter 1',
- 'description': 'American Experience, TV’s most-watched history series, brings to life the compelling stories from our past that inform our understanding of the world today.',
+ 'description': 'md5:1b80a74e0380ed2a4fb335026de1600d',
'duration': 682,
'thumbnail': 're:^https?://.*\.jpg$',
},
},
},
{
- 'url': 'http://video.pbs.org/video/2365367186/',
+ 'url': 'http://www.pbs.org/video/2365245528/',
'info_dict': {
- 'id': '2365367186',
- 'display_id': '2365367186',
+ 'id': '2365245528',
+ 'display_id': '2365245528',
'ext': 'mp4',
- 'title': 'To Catch A Comet - Full Episode',
- 'description': 'On November 12, 2014, billions of kilometers from Earth, spacecraft orbiter Rosetta and lander Philae did what no other had dared to attempt \u2014 land on the volatile surface of a comet as it zooms around the sun at 67,000 km/hr. The European Space Agency hopes this mission can help peer into our past and unlock secrets of our origins.',
- 'duration': 3342,
+ 'title': 'FRONTLINE - United States of Secrets (Part One)',
+ 'description': 'md5:55756bd5c551519cc4b7703e373e217e',
+ 'duration': 6851,
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True, # requires ffmpeg
},
- 'skip': 'Expired',
},
{
# Video embedded in iframe containing angle brackets as attribute's value (e.g.
'display_id': 'a-chefs-life-season-3-episode-5-prickly-business',
'ext': 'mp4',
'title': "A Chef's Life - Season 3, Ep. 5: Prickly Business",
- 'description': 'md5:61db2ddf27c9912f09c241014b118ed1',
+ 'description': 'md5:54033c6baa1f9623607c6e2ed245888b',
'duration': 1480,
'thumbnail': 're:^https?://.*\.jpg$',
},
'display_id': 'the-atomic-artists',
'ext': 'mp4',
'title': 'FRONTLINE - The Atomic Artists',
- 'description': 'md5:f5bfbefadf421e8bb8647602011caf8e',
+ 'description': 'md5:1a2481e86b32b2e12ec1905dd473e2c1',
'duration': 723,
'thumbnail': 're:^https?://.*\.jpg$',
},
'skip_download': True, # requires ffmpeg
},
},
+ {
+ # Serves hd only via wigget/partnerplayer page
+ 'url': 'http://www.pbs.org/video/2365641075/',
+ 'info_dict': {
+ 'id': '2365641075',
+ 'ext': 'mp4',
+ 'title': 'FRONTLINE - Netanyahu at War',
+ 'duration': 6852,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'formats': 'mincount:8',
+ },
+ 'params': {
+ 'skip_download': True, # requires ffmpeg
+ },
+ },
{
'url': 'http://player.pbs.org/widget/partnerplayer/2365297708/?start=0&end=0&chapterbar=false&endscreen=false&topbar=true',
'only_matching': True,
+ },
+ {
+ 'url': 'http://watch.knpb.org/video/2365616055/',
+ 'only_matching': True,
}
]
_ERRORS = {
webpage, 'upload date', default=None))
# tabbed frontline videos
- tabbed_videos = re.findall(
- r'<div[^>]+class="videotab[^"]*"[^>]+vid="(\d+)"', webpage)
- if tabbed_videos:
- return tabbed_videos, presumptive_id, upload_date
+ MULTI_PART_REGEXES = (
+ r'<div[^>]+class="videotab[^"]*"[^>]+vid="(\d+)"',
+ r'<a[^>]+href=["\']#video-\d+["\'][^>]+data-coveid=["\'](\d+)',
+ )
+ for p in MULTI_PART_REGEXES:
+ tabbed_videos = re.findall(p, webpage)
+ if tabbed_videos:
+ return tabbed_videos, presumptive_id, upload_date
MEDIA_ID_REGEXES = [
r"div\s*:\s*'videoembed'\s*,\s*mediaid\s*:\s*'(\d+)'", # frontline video embed
r'class="coveplayerid">([^<]+)<', # coveplayer
+ r'<section[^>]+data-coveid="(\d+)"', # coveplayer from http://www.pbs.org/wgbh/frontline/film/real-csi/
r'<input type="hidden" id="pbs_video_id_[0-9]+" value="([0-9]+)"/>', # jwplayer
]
for vid_id in video_id]
return self.playlist_result(entries, display_id)
- info = self._download_json(
- 'http://player.pbs.org/videoInfo/%s?format=json&type=partner' % video_id,
- display_id)
+ info = None
+ redirects = []
+ redirect_urls = set()
+
+ def extract_redirect_urls(info):
+ for encoding_name in ('recommended_encoding', 'alternate_encoding'):
+ redirect = info.get(encoding_name)
+ if not redirect:
+ continue
+ redirect_url = redirect.get('url')
+ if redirect_url and redirect_url not in redirect_urls:
+ redirects.append(redirect)
+ redirect_urls.add(redirect_url)
+
+ try:
+ video_info = self._download_json(
+ 'http://player.pbs.org/videoInfo/%s?format=json&type=partner' % video_id,
+ display_id, 'Downloading video info JSON')
+ extract_redirect_urls(video_info)
+ info = video_info
+ except ExtractorError as e:
+ # videoInfo API may not work for some videos
+ if not isinstance(e.cause, compat_HTTPError) or e.cause.code != 404:
+ raise
+
+ # Player pages may also serve different qualities
+ for page in ('widget/partnerplayer', 'portalplayer'):
+ player = self._download_webpage(
+ 'http://player.pbs.org/%s/%s' % (page, video_id),
+ display_id, 'Downloading %s page' % page, fatal=False)
+ if player:
+ video_info = self._parse_json(
+ self._search_regex(
+ r'(?s)PBS\.videoData\s*=\s*({.+?});\n',
+ player, '%s video data' % page, default='{}'),
+ display_id, transform_source=js_to_json, fatal=False)
+ if video_info:
+ extract_redirect_urls(video_info)
+ if not info:
+ info = video_info
formats = []
- for encoding_name in ('recommended_encoding', 'alternate_encoding'):
- redirect = info.get(encoding_name)
- if not redirect:
- continue
- redirect_url = redirect.get('url')
- if not redirect_url:
- continue
+ for num, redirect in enumerate(redirects):
+ redirect_id = redirect.get('eeid')
redirect_info = self._download_json(
- redirect_url + '?format=json', display_id,
- 'Downloading %s video url info' % encoding_name)
+ '%s?format=json' % redirect['url'], display_id,
+ 'Downloading %s video url info' % (redirect_id or num))
if redirect_info['status'] == 'error':
raise ExtractorError(
else:
formats.append({
'url': format_url,
- 'format_id': redirect.get('eeid'),
+ 'format_id': redirect_id,
})
+ self._remove_duplicate_formats(formats)
self._sort_formats(formats)
rating_str = info.get('rating')
'id': video_id,
'display_id': display_id,
'title': info['title'],
- 'description': info['program'].get('description'),
+ 'description': info.get('description') or info.get('program', {}).get('description'),
'thumbnail': info.get('image_url'),
'duration': int_or_none(info.get('duration')),
'age_limit': age_limit,
}]
def _call_api(self, method, value):
- attribute = 'token' if len(value) > 13 else 'broadcast_id'
return self._download_json(
- 'https://api.periscope.tv/api/v2/%s?%s=%s' % (method, attribute, value), value)
+ 'https://api.periscope.tv/api/v2/%s?broadcast_id=%s' % (method, value), value)
def _real_extract(self, url):
token = self._match_id(url)
from __future__ import unicode_literals
-from .common import InfoExtractor
-from .zdf import extract_from_xml_url
+from .zdf import ZDFIE
-class PhoenixIE(InfoExtractor):
+class PhoenixIE(ZDFIE):
+ IE_NAME = 'phoenix.de'
_VALID_URL = r'''(?x)https?://(?:www\.)?phoenix\.de/content/
(?:
phoenix/die_sendungen/(?:[^/]+/)?
r'<div class="phx_vod" id="phx_vod_([0-9]+)"',
webpage, 'internal video ID')
- api_url = 'http://www.phoenix.de/php/zdfplayer-v1.3/data/beitragsDetails.php?ak=web&id=%s' % internal_id
- return extract_from_xml_url(self, video_id, api_url)
+ api_url = 'http://www.phoenix.de/php/mediaplayer/data/beitrags_details.php?ak=web&id=%s' % internal_id
+ return self.extract_from_xml_url(video_id, api_url)
# coding: utf-8
from __future__ import unicode_literals
+import re
+
from .common import InfoExtractor
from ..utils import (
ExtractorError,
'only_matching': True,
}]
+ @staticmethod
+ def _extract_url(webpage):
+ mobj = re.search(
+ r'<iframe[^>]+src="(?P<url>(?:https?:)?//out\.pladform\.ru/player\?.+?)"', webpage)
+ if mobj:
+ return mobj.group('url')
+
def _real_extract(self, url):
video_id = self._match_id(url)
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import int_or_none
+
+
+class PlaysTVIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?plays\.tv/video/(?P<id>[0-9a-f]{18})'
+ _TEST = {
+ 'url': 'http://plays.tv/video/56af17f56c95335490/when-you-outplay-the-azir-wall',
+ 'md5': 'dfeac1198506652b5257a62762cec7bc',
+ 'info_dict': {
+ 'id': '56af17f56c95335490',
+ 'ext': 'mp4',
+ 'title': 'When you outplay the Azir wall',
+ 'description': 'Posted by Bjergsen',
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ title = self._og_search_title(webpage)
+ content = self._parse_json(
+ self._search_regex(
+ r'R\.bindContent\(({.+?})\);', webpage,
+ 'content'), video_id)['content']
+ mpd_url, sources = re.search(
+ r'(?s)<video[^>]+data-mpd="([^"]+)"[^>]*>(.+?)</video>',
+ content).groups()
+ formats = self._extract_mpd_formats(
+ self._proto_relative_url(mpd_url), video_id, mpd_id='DASH')
+ for format_id, height, format_url in re.findall(r'<source\s+res="((\d+)h?)"\s+src="([^"]+)"', sources):
+ formats.append({
+ 'url': self._proto_relative_url(format_url),
+ 'format_id': 'http-' + format_id,
+ 'height': int_or_none(height),
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': self._og_search_description(webpage),
+ 'thumbnail': self._og_search_thumbnail(webpage),
+ 'formats': formats,
+ }
from __future__ import unicode_literals
+import re
import json
import random
import collections
ExtractorError,
int_or_none,
parse_duration,
+ qualities,
sanitized_Request,
)
'low': {'width': 640, 'height': 480},
'medium': {'width': 848, 'height': 640},
'high': {'width': 1024, 'height': 768},
+ 'high-widescreen': {'width': 1280, 'height': 720},
}
+ QUALITIES_PREFERENCE = ('low', 'medium', 'high', 'high-widescreen',)
+ quality_key = qualities(QUALITIES_PREFERENCE)
+
AllowedQuality = collections.namedtuple('AllowedQuality', ['ext', 'qualities'])
ALLOWED_QUALITIES = (
- AllowedQuality('webm', ('high',)),
- AllowedQuality('mp4', ('low', 'medium', 'high',)),
+ AllowedQuality('webm', ['high', ]),
+ AllowedQuality('mp4', ['low', 'medium', 'high', ]),
)
+ # Some courses also offer widescreen resolution for high quality (see
+ # https://github.com/rg3/youtube-dl/issues/7766)
+ widescreen = True if re.search(
+ r'courseSupportsWidescreenVideoFormats\s*:\s*true', webpage) else False
+ best_quality = 'high-widescreen' if widescreen else 'high'
+ if widescreen:
+ for allowed_quality in ALLOWED_QUALITIES:
+ allowed_quality.qualities.append(best_quality)
+
# In order to minimize the number of calls to ViewClip API and reduce
# the probability of being throttled or banned by Pluralsight we will request
# only single format until formats listing was explicitly requested.
else:
def guess_allowed_qualities():
req_format = self._downloader.params.get('format') or 'best'
- req_format_split = req_format.split('-')
+ req_format_split = req_format.split('-', 1)
if len(req_format_split) > 1:
req_ext, req_quality = req_format_split
for allowed_quality in ALLOWED_QUALITIES:
if req_ext == allowed_quality.ext and req_quality in allowed_quality.qualities:
return (AllowedQuality(req_ext, (req_quality, )), )
req_ext = 'webm' if self._downloader.params.get('prefer_free_formats') else 'mp4'
- return (AllowedQuality(req_ext, ('high', )), )
+ return (AllowedQuality(req_ext, (best_quality, )), )
allowed_qualities = guess_allowed_qualities()
formats = []
- for ext, qualities in allowed_qualities:
- for quality in qualities:
+ for ext, qualities_ in allowed_qualities:
+ for quality in qualities_:
f = QUALITIES[quality].copy()
clip_post = {
'a': author,
'url': clip_url,
'ext': ext,
'format_id': format_id,
+ 'quality': quality_key(quality),
})
formats.append(f)
self._sort_formats(formats)
# { a = author, cn = clip_id, lc = end, m = name }
return {
- 'id': clip['clipName'],
+ 'id': clip.get('clipName') or clip['name'],
'title': '%s - %s' % (module['title'], clip['title']),
'duration': int_or_none(clip.get('duration')) or parse_duration(clip.get('formattedDuration')),
'creator': author,
)
from ..utils import (
ExtractorError,
+ int_or_none,
sanitized_Request,
str_to_int,
)
_VALID_URL = r'https?://(?:[a-z]+\.)?pornhub\.com/(?:view_video\.php\?viewkey=|embed/)(?P<id>[0-9a-z]+)'
_TESTS = [{
'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
- 'md5': '882f488fa1f0026f023f33576004a2ed',
+ 'md5': '1e19b41231a02eba417839222ac9d58e',
'info_dict': {
'id': '648719015',
'ext': 'mp4',
- "uploader": "Babes",
- "title": "Seductive Indian beauty strips down and fingers her pink pussy",
- "age_limit": 18
+ 'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
+ 'uploader': 'Babes',
+ 'duration': 361,
+ 'view_count': int,
+ 'like_count': int,
+ 'dislike_count': int,
+ 'comment_count': int,
+ 'age_limit': 18,
}
}, {
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
'PornHub said: %s' % error_msg,
expected=True, video_id=video_id)
- video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
+ flashvars = self._parse_json(
+ self._search_regex(
+ r'var\s+flashv1ars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
+ video_id)
+ if flashvars:
+ video_title = flashvars.get('video_title')
+ thumbnail = flashvars.get('image_url')
+ duration = int_or_none(flashvars.get('video_duration'))
+ else:
+ video_title, thumbnail, duration = [None] * 3
+
+ if not video_title:
+ video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
+
video_uploader = self._html_search_regex(
r'(?s)From: .+?<(?:a href="/users/|a href="/channels/|span class="username)[^>]+>(.+?)<',
webpage, 'uploader', fatal=False)
- thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail', fatal=False)
- if thumbnail:
- thumbnail = compat_urllib_parse_unquote(thumbnail)
view_count = self._extract_count(
r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
path = compat_urllib_parse_urlparse(video_url).path
extension = os.path.splitext(path)[1][1:]
format = path.split('/')[5].split('_')[:2]
- format = "-".join(format)
+ format = '-'.join(format)
m = re.match(r'^(?P<height>[0-9]+)[pP]-(?P<tbr>[0-9]+)[kK]$', format)
if m is None:
'uploader': video_uploader,
'title': video_title,
'thumbnail': thumbnail,
+ 'duration': duration,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
}
-class PornHubPlaylistIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?pornhub\.com/playlist/(?P<id>\d+)'
- _TESTS = [{
- 'url': 'http://www.pornhub.com/playlist/6201671',
- 'info_dict': {
- 'id': '6201671',
- 'title': 'P0p4',
- },
- 'playlist_mincount': 35,
- }]
+class PornHubPlaylistBaseIE(InfoExtractor):
+ def _extract_entries(self, webpage):
+ return [
+ self.url_result('http://www.pornhub.com/%s' % video_url, PornHubIE.ie_key())
+ for video_url in set(re.findall(
+ r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"', webpage))
+ ]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
- entries = [
- self.url_result('http://www.pornhub.com/%s' % video_url, 'PornHub')
- for video_url in set(re.findall('href="/?(view_video\.php\?viewkey=\d+[^"]*)"', webpage))
- ]
+ entries = self._extract_entries(webpage)
playlist = self._parse_json(
self._search_regex(
return self.playlist_result(
entries, playlist_id, playlist.get('title'), playlist.get('description'))
+
+
+class PornHubPlaylistIE(PornHubPlaylistBaseIE):
+ _VALID_URL = r'https?://(?:www\.)?pornhub\.com/playlist/(?P<id>\d+)'
+ _TESTS = [{
+ 'url': 'http://www.pornhub.com/playlist/6201671',
+ 'info_dict': {
+ 'id': '6201671',
+ 'title': 'P0p4',
+ },
+ 'playlist_mincount': 35,
+ }]
+
+
+class PornHubUserVideosIE(PornHubPlaylistBaseIE):
+ _VALID_URL = r'https?://(?:www\.)?pornhub\.com/users/(?P<id>[^/]+)/videos'
+ _TESTS = [{
+ 'url': 'http://www.pornhub.com/users/rushandlia/videos',
+ 'info_dict': {
+ 'id': 'rushandlia',
+ },
+ 'playlist_mincount': 13,
+ }]
+
+ def _real_extract(self, url):
+ user_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, user_id)
+
+ return self.playlist_result(self._extract_entries(webpage), user_id)
r'<h1>(.+?)</h1>', webpage, 'title', flags=re.DOTALL)
description = self._html_search_regex(
r'<article id="descriptif">(.+?)</article>',
- webpage, "description", fatal=False, flags=re.DOTALL)
+ webpage, 'description', fatal=False, flags=re.DOTALL)
thumbnail = self._search_regex(
r'<div id="mediaspace%s">\s*<img src="/?([^"]+)"' % video_id,
class ProSiebenSat1IE(InfoExtractor):
IE_NAME = 'prosiebensat1'
IE_DESC = 'ProSiebenSat.1 Digital'
- _VALID_URL = r'https?://(?:www\.)?(?:(?:prosieben|prosiebenmaxx|sixx|sat1|kabeleins|the-voice-of-germany)\.(?:de|at|ch)|ran\.de|fem\.com)/(?P<id>.+)'
+ _VALID_URL = r'https?://(?:www\.)?(?:(?:prosieben|prosiebenmaxx|sixx|sat1|kabeleins|the-voice-of-germany|7tv)\.(?:de|at|ch)|ran\.de|fem\.com)/(?P<id>.+)'
_TESTS = [
{
'url': 'http://www.prosieben.de/tv/circus-halligalli/videos/218-staffel-2-episode-18-jahresrueckblick-ganze-folge',
'info_dict': {
'id': '2104602',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': 'Episode 18 - Staffel 2',
'description': 'md5:8733c81b702ea472e069bc48bb658fc1',
'upload_date': '20131231',
'url': 'http://www.the-voice-of-germany.de/video/31-andreas-kuemmert-rocket-man-clip',
'info_dict': {
'id': '2572814',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': 'Andreas Kümmert: Rocket Man',
'description': 'md5:6ddb02b0781c6adf778afea606652e38',
'upload_date': '20131017',
'duration': 469.88,
},
'params': {
- # rtmp download
'skip_download': True,
},
},
'url': 'http://www.fem.com/wellness/videos/wellness-video-clip-kurztripps-zum-valentinstag.html',
'info_dict': {
'id': '2156342',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': 'Kurztrips zum Valentinstag',
- 'description': 'Romantischer Kurztrip zum Valentinstag? Wir verraten, was sich hier wirklich lohnt.',
+ 'description': 'Romantischer Kurztrip zum Valentinstag? Nina Heinemann verrät, was sich hier wirklich lohnt.',
'duration': 307.24,
},
'params': {
- # rtmp download
'skip_download': True,
},
},
},
'playlist_count': 2,
},
+ {
+ 'url': 'http://www.7tv.de/circus-halligalli/615-best-of-circus-halligalli-ganze-folge',
+ 'info_dict': {
+ 'id': '4187506',
+ 'ext': 'flv',
+ 'title': 'Best of Circus HalliGalli',
+ 'description': 'md5:8849752efd90b9772c9db6fdf87fb9e9',
+ 'upload_date': '20151229',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
]
_CLIPID_REGEXES = [
r'"clip_id"\s*:\s+"(\d+)"',
r'clipid: "(\d+)"',
r'clip[iI]d=(\d+)',
+ r'clip[iI]d\s*=\s*["\'](\d+)',
r"'itemImageUrl'\s*:\s*'/dynamic/thumbnails/full/\d+/(\d+)",
]
_TITLE_REGEXES = [
r'<!-- start video -->\s*<h1>(.+?)</h1>',
r'<h1 class="att-name">\s*(.+?)</h1>',
r'<header class="module_header">\s*<h2>([^<]+)</h2>\s*</header>',
+ r'<h2 class="video-title" itemprop="name">\s*(.+?)</h2>',
+ r'<div[^>]+id="veeseoTitle"[^>]*>(.+?)</div>',
]
_DESCRIPTION_REGEXES = [
r'<p itemprop="description">\s*(.+?)</p>',
r'<div class="videoDecription">\s*<p><strong>Beschreibung</strong>: (.+?)</p>',
r'<div class="g-plusone" data-size="medium"></div>\s*</div>\s*</header>\s*(.+?)\s*<footer>',
r'<p class="att-description">\s*(.+?)\s*</p>',
+ r'<p class="video-description" itemprop="description">\s*(.+?)</p>',
+ r'<div[^>]+id="veeseoDescription"[^>]*>(.+?)</div>',
]
_UPLOAD_DATE_REGEXES = [
r'<meta property="og:published_time" content="(.+?)">',
strip_jsonp,
unescapeHTML,
clean_html,
+ ExtractorError,
)
'info_dict': {
'id': '001BLpXF2DyJe2',
'title': '林俊杰',
- 'description': 'md5:2a222d89ba4455a3af19940c0481bb78',
+ 'description': 'md5:870ec08f7d8547c29c93010899103751',
},
'playlist_count': 12,
}
'url': 'http://y.qq.com/#type=toplist&p=top_3',
'info_dict': {
'id': 'top_3',
- 'title': 'QQ音乐巅峰榜·欧美',
+ 'title': '巅峰榜·欧美',
'description': 'QQ音乐巅峰榜·欧美根据用户收听行为自动生成,集结当下最流行的欧美新歌!:更新时间:每周四22点|统'
'计周期:一周(上周四至本周三)|统计对象:三个月内发行的欧美歌曲|统计数量:100首|统计算法:根据'
'歌曲在一周内的有效播放次数,由高到低取前100名(同一歌手最多允许5首歌曲同时上榜)|有效播放次数:'
IE_DESC = 'QQ音乐 - 歌单'
_VALID_URL = r'http://y\.qq\.com/#type=taoge&id=(?P<id>[0-9]+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://y.qq.com/#type=taoge&id=3462654915',
'info_dict': {
'id': '3462654915',
'description': 'md5:d2c9d758a96b9888cf4fe82f603121d4',
},
'playlist_count': 40,
- }
+ 'skip': 'playlist gone',
+ }, {
+ 'url': 'http://y.qq.com/#type=taoge&id=1374105607',
+ 'info_dict': {
+ 'id': '1374105607',
+ 'title': '易入人心的华语民谣',
+ 'description': '民谣的歌曲易于传唱、、歌词朗朗伤口、旋律简单温馨。属于那种才入耳孔。却上心头的感觉。没有太多的复杂情绪。简单而直接地表达乐者的情绪,就是这样的简单才易入人心。',
+ },
+ 'playlist_count': 20,
+ }]
def _real_extract(self, url):
list_id = self._match_id(url)
list_json = self._download_json(
'http://i.y.qq.com/qzone-music/fcg-bin/fcg_ucc_getcdinfo_byids_cp.fcg?type=1&json=1&utf8=1&onlysong=0&disstid=%s'
% list_id, list_id, 'Download list page',
- transform_source=strip_jsonp)['cdlist'][0]
-
+ transform_source=strip_jsonp)
+ if not len(list_json.get('cdlist', [])):
+ if list_json.get('code'):
+ raise ExtractorError(
+ 'QQ Music said: error %d in fetching playlist info' % list_json['code'],
+ expected=True)
+ raise ExtractorError('Unable to get playlist info')
+
+ cdlist = list_json['cdlist'][0]
entries = [
self.url_result(
'http://y.qq.com/#type=song&mid=' + song['songmid'], 'QQMusic', song['songmid']
- ) for song in list_json['songlist']
+ ) for song in cdlist['songlist']
]
- list_name = list_json.get('dissname')
- list_description = clean_html(unescapeHTML(list_json.get('desc')))
+ list_name = cdlist.get('dissname')
+ list_description = clean_html(unescapeHTML(cdlist.get('desc')))
return self.playlist_result(entries, list_id, list_name, list_description)
def _real_extract(self, url):
video_id = self._match_id(url)
- meta_url = "http://www.radiobremen.de/apps/php/mediathek/metadaten.php?id=%s" % video_id
+ meta_url = 'http://www.radiobremen.de/apps/php/mediathek/metadaten.php?id=%s' % video_id
meta_doc = self._download_webpage(
meta_url, video_id, 'Downloading metadata')
title = self._html_search_regex(
- r"<h1.*>(?P<title>.+)</h1>", meta_doc, "title")
+ r'<h1.*>(?P<title>.+)</h1>', meta_doc, 'title')
description = self._html_search_regex(
- r"<p>(?P<description>.*)</p>", meta_doc, "description", fatal=False)
+ r'<p>(?P<description>.*)</p>', meta_doc, 'description', fatal=False)
duration = parse_duration(self._html_search_regex(
- r"Länge:</td>\s+<td>(?P<duration>[0-9]+:[0-9]+)</td>",
- meta_doc, "duration", fatal=False))
+ r'Länge:</td>\s+<td>(?P<duration>[0-9]+:[0-9]+)</td>',
+ meta_doc, 'duration', fatal=False))
page_doc = self._download_webpage(
url, video_id, 'Downloading video information')
formats = [{
'url': video_url,
'ext': 'mp4',
- 'width': int(mobj.group("width")),
+ 'width': int(mobj.group('width')),
}]
return {
'id': video_id,
'info_dict': {
'id': 'one-one',
'ext': 'ogg',
- "title": "One to one",
- "description": "Plutôt que d'imaginer la radio de demain comme technologie ou comme création de contenu, je veux montrer que quelles que soient ses évolutions, j'ai l'intime conviction que la radio continuera d'être un grand média de proximité pour les auditeurs.",
- "uploader": "Thomas Hercouët",
+ 'title': 'One to one',
+ 'description': "Plutôt que d'imaginer la radio de demain comme technologie ou comme création de contenu, je veux montrer que quelles que soient ses évolutions, j'ai l'intime conviction que la radio continuera d'être un grand média de proximité pour les auditeurs.",
+ 'uploader': 'Thomas Hercouët',
},
}
compat_urlparse,
)
from ..utils import (
+ ExtractorError,
+ determine_ext,
parse_duration,
unified_strdate,
+ int_or_none,
+ xpath_text,
)
-class RaiIE(InfoExtractor):
- _VALID_URL = r'(?P<url>(?P<host>http://(?:.+?\.)?(?:rai\.it|rai\.tv|rainews\.it))/dl/.+?-(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})(?:-.+?)?\.html)'
+class RaiTVIE(InfoExtractor):
+ _VALID_URL = r'http://(?:.+?\.)?(?:rai\.it|rai\.tv|rainews\.it)/dl/(?:[^/]+/)+media/.+?-(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})(?:-.+?)?\.html'
_TESTS = [
{
'url': 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-cb27157f-9dd0-4aee-b788-b1f67643a391.html',
- 'md5': 'c064c0b2d09c278fb293116ef5d0a32d',
+ 'md5': '96382709b61dd64a6b88e0f791e6df4c',
'info_dict': {
'id': 'cb27157f-9dd0-4aee-b788-b1f67643a391',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': 'Report del 07/04/2014',
'description': 'md5:f27c544694cacb46a078db84ec35d2d9',
'upload_date': '20140407',
},
{
'url': 'http://www.raisport.rai.it/dl/raiSport/media/rassegna-stampa-04a9f4bd-b563-40cf-82a6-aad3529cb4a9.html',
- 'md5': '8bb9c151924ce241b74dd52ef29ceafa',
+ 'md5': 'd9751b78eac9710d62c2447b224dea39',
'info_dict': {
'id': '04a9f4bd-b563-40cf-82a6-aad3529cb4a9',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': 'TG PRIMO TEMPO',
- 'description': '',
'upload_date': '20140612',
'duration': 1758,
},
- 'skip': 'Error 404',
},
{
'url': 'http://www.rainews.it/dl/rainews/media/state-of-the-net-Antonella-La-Carpia-regole-virali-7aafdea9-0e5d-49d5-88a6-7e65da67ae13.html',
},
{
'url': 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-b4a49761-e0cc-4b14-8736-2729f6f73132-tg2.html',
- 'md5': '35694f062977fe6619943f08ed935730',
'info_dict': {
'id': 'b4a49761-e0cc-4b14-8736-2729f6f73132',
'ext': 'mp4',
'title': 'Alluvione in Sardegna e dissesto idrogeologico',
'description': 'Edizione delle ore 20:30 ',
- }
+ },
+ 'skip': 'invalid urls',
},
{
'url': 'http://www.ilcandidato.rai.it/dl/ray/media/Il-Candidato---Primo-episodio-Le-Primarie-28e5525a-b495-45e8-a7c3-bc48ba45d2b6.html',
- 'md5': '02b64456f7cc09f96ff14e7dd489017e',
+ 'md5': '496ab63e420574447f70d02578333437',
'info_dict': {
'id': '28e5525a-b495-45e8-a7c3-bc48ba45d2b6',
'ext': 'flv',
'title': 'Il Candidato - Primo episodio: "Le Primarie"',
- 'description': 'Primo appuntamento con "Il candidato" con Filippo Timi, alias Piero Zucca presidente!',
- 'uploader': 'RaiTre',
+ 'description': 'md5:364b604f7db50594678f483353164fb8',
+ 'upload_date': '20140923',
+ 'duration': 386,
}
},
- {
- 'url': 'http://www.report.rai.it/dl/Report/puntata/ContentItem-0c7a664b-d0f4-4b2c-8835-3f82e46f433e.html',
- 'md5': '037104d2c14132887e5e4cf114569214',
- 'info_dict': {
- 'id': '0c7a664b-d0f4-4b2c-8835-3f82e46f433e',
- 'ext': 'flv',
- 'title': 'Il pacco',
- 'description': 'md5:4b1afae1364115ce5d78ed83cd2e5b3a',
- 'uploader': 'RaiTre',
- 'upload_date': '20141221',
- },
- }
]
- def _extract_relinker_url(self, webpage):
- return self._proto_relative_url(self._search_regex(
- [r'name="videourl" content="([^"]+)"', r'var\s+videoURL(?:_MP4)?\s*=\s*"([^"]+)"'],
- webpage, 'relinker url', default=None))
-
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
- host = mobj.group('host')
+ video_id = self._match_id(url)
+ media = self._download_json(
+ 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-%s.html?json' % video_id,
+ video_id, 'Downloading video JSON')
- webpage = self._download_webpage(url, video_id)
+ thumbnails = []
+ for image_type in ('image', 'image_medium', 'image_300'):
+ thumbnail_url = media.get(image_type)
+ if thumbnail_url:
+ thumbnails.append({
+ 'url': thumbnail_url,
+ })
- relinker_url = self._extract_relinker_url(webpage)
-
- if not relinker_url:
- iframe_url = self._search_regex(
- [r'<iframe[^>]+src="([^"]*/dl/[^"]+\?iframe\b[^"]*)"',
- r'drawMediaRaiTV\(["\'](.+?)["\']'],
- webpage, 'iframe')
- if not iframe_url.startswith('http'):
- iframe_url = compat_urlparse.urljoin(url, iframe_url)
- webpage = self._download_webpage(
- iframe_url, video_id)
- relinker_url = self._extract_relinker_url(webpage)
-
- relinker = self._download_json(
- '%s&output=47' % relinker_url, video_id)
-
- media_url = relinker['video'][0]
- ct = relinker.get('ct')
- if ct == 'f4m':
- formats = self._extract_f4m_formats(
- media_url + '&hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id)
- else:
- formats = [{
- 'url': media_url,
- 'format_id': ct,
- }]
+ subtitles = []
+ formats = []
+ media_type = media['type']
+ if 'Audio' in media_type:
+ formats.append({
+ 'format_id': media.get('formatoAudio'),
+ 'url': media['audioUrl'],
+ 'ext': media.get('formatoAudio'),
+ })
+ elif 'Video' in media_type:
+ def fix_xml(xml):
+ return xml.replace(' tag elementi', '').replace('>/', '</')
+
+ relinker = self._download_xml(
+ media['mediaUri'] + '&output=43',
+ video_id, transform_source=fix_xml)
- json_link = self._html_search_meta(
- 'jsonlink', webpage, 'JSON link', default=None)
- if json_link:
- media = self._download_json(
- host + json_link, video_id, 'Downloading video JSON')
- title = media.get('name')
- description = media.get('desc')
- thumbnail = media.get('image_300') or media.get('image_medium') or media.get('image')
- duration = parse_duration(media.get('length'))
- uploader = media.get('author')
- upload_date = unified_strdate(media.get('date'))
+ has_subtitle = False
+
+ for element in relinker.findall('element'):
+ media_url = xpath_text(element, 'url')
+ ext = determine_ext(media_url)
+ content_type = xpath_text(element, 'content-type')
+ if ext == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ media_url, video_id, 'mp4', 'm3u8_native',
+ m3u8_id='hls', fatal=False))
+ elif ext == 'f4m':
+ formats.extend(self._extract_f4m_formats(
+ media_url + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44',
+ video_id, f4m_id='hds', fatal=False))
+ elif ext == 'stl':
+ has_subtitle = True
+ elif content_type.startswith('video/'):
+ bitrate = int_or_none(xpath_text(element, 'bitrate'))
+ formats.append({
+ 'url': media_url,
+ 'tbr': bitrate if bitrate > 0 else None,
+ 'format_id': 'http-%d' % bitrate if bitrate > 0 else 'http',
+ })
+ elif content_type.startswith('image/'):
+ thumbnails.append({
+ 'url': media_url,
+ })
+
+ self._sort_formats(formats)
+
+ if has_subtitle:
+ webpage = self._download_webpage(url, video_id)
+ subtitles = self._get_subtitles(video_id, webpage)
else:
- title = (self._search_regex(
- r'var\s+videoTitolo\s*=\s*"(.+?)";',
- webpage, 'title', default=None) or self._og_search_title(webpage)).replace('\\"', '"')
- description = self._og_search_description(webpage)
- thumbnail = self._og_search_thumbnail(webpage)
- duration = None
- uploader = self._html_search_meta('Editore', webpage, 'uploader')
- upload_date = unified_strdate(self._html_search_meta(
- 'item-date', webpage, 'upload date', default=None))
-
- subtitles = self.extract_subtitles(video_id, webpage)
+ raise ExtractorError('not a media file')
return {
'id': video_id,
- 'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
- 'uploader': uploader,
- 'upload_date': upload_date,
- 'duration': duration,
+ 'title': media['name'],
+ 'description': media.get('desc'),
+ 'thumbnails': thumbnails,
+ 'uploader': media.get('author'),
+ 'upload_date': unified_strdate(media.get('date')),
+ 'duration': parse_duration(media.get('length')),
'formats': formats,
'subtitles': subtitles,
}
'url': 'http://www.rai.tv%s' % compat_urllib_parse.quote(captions),
}]
return subtitles
+
+
+class RaiIE(InfoExtractor):
+ _VALID_URL = r'http://(?:.+?\.)?(?:rai\.it|rai\.tv|rainews\.it)/dl/.+?-(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})(?:-.+?)?\.html'
+ _TESTS = [
+ {
+ 'url': 'http://www.report.rai.it/dl/Report/puntata/ContentItem-0c7a664b-d0f4-4b2c-8835-3f82e46f433e.html',
+ 'md5': 'e0e7a8a131e249d1aa0ebf270d1d8db7',
+ 'info_dict': {
+ 'id': '59d69d28-6bb6-409d-a4b5-ed44096560af',
+ 'ext': 'flv',
+ 'title': 'Il pacco',
+ 'description': 'md5:4b1afae1364115ce5d78ed83cd2e5b3a',
+ 'upload_date': '20141221',
+ },
+ }
+ ]
+
+ @classmethod
+ def suitable(cls, url):
+ return False if RaiTVIE.suitable(url) else super(RaiIE, cls).suitable(url)
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ iframe_url = self._search_regex(
+ [r'<iframe[^>]+src="([^"]*/dl/[^"]+\?iframe\b[^"]*)"',
+ r'drawMediaRaiTV\(["\'](.+?)["\']'],
+ webpage, 'iframe')
+ if not iframe_url.startswith('http'):
+ iframe_url = compat_urlparse.urljoin(url, iframe_url)
+ return self.url_result(iframe_url)
'info_dict': {
'id': 'ford-lopatin-live-at-primavera-sound-2011',
'ext': 'mp3',
- "uploader_id": "ford-lopatin",
- "location": "Spain",
- "description": "Joel Ford and Daniel ’Oneohtrix Point Never’ Lopatin fly their midified pop extravaganza to Spain. Live at Primavera Sound 2011.",
- "uploader": "Ford & Lopatin",
- "title": "Live at Primavera Sound 2011",
+ 'uploader_id': 'ford-lopatin',
+ 'location': 'Spain',
+ 'description': 'Joel Ford and Daniel ’Oneohtrix Point Never’ Lopatin fly their midified pop extravaganza to Spain. Live at Primavera Sound 2011.',
+ 'uploader': 'Ford & Lopatin',
+ 'title': 'Live at Primavera Sound 2011',
},
}
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+from ..utils import (
+ sanitized_Request,
+ xpath_text,
+ xpath_with_ns,
+)
+
+
+class RegioTVIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?regio-tv\.de/video/(?P<id>[0-9]+)'
+ _TESTS = [{
+ 'url': 'http://www.regio-tv.de/video/395808.html',
+ 'info_dict': {
+ 'id': '395808',
+ 'ext': 'mp4',
+ 'title': 'Wir in Ludwigsburg',
+ 'description': 'Mit unseren zuckersüßen Adventskindern, außerdem besuchen wir die Abendsterne!',
+ }
+ }, {
+ 'url': 'http://www.regio-tv.de/video/395808',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ key = self._search_regex(
+ r'key\s*:\s*(["\'])(?P<key>.+?)\1', webpage, 'key', group='key')
+ title = self._og_search_title(webpage)
+
+ SOAP_TEMPLATE = '<?xml version="1.0" encoding="utf-8"?><soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"><soap:Body><{0} xmlns="http://v.telvi.de/"><key xsi:type="xsd:string">{1}</key></{0}></soap:Body></soap:Envelope>'
+
+ request = sanitized_Request(
+ 'http://v.telvi.de/',
+ SOAP_TEMPLATE.format('GetHTML5VideoData', key).encode('utf-8'))
+ video_data = self._download_xml(request, video_id, 'Downloading video XML')
+
+ NS_MAP = {
+ 'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
+ 'soap': 'http://schemas.xmlsoap.org/soap/envelope/',
+ }
+
+ video_url = xpath_text(
+ video_data, xpath_with_ns('.//video', NS_MAP), 'video url', fatal=True)
+ thumbnail = xpath_text(
+ video_data, xpath_with_ns('.//image', NS_MAP), 'thumbnail')
+ description = self._og_search_description(
+ webpage) or self._html_search_meta('description', webpage)
+
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ }
'url': 'http://www.reverbnation.com/alkilados/song/16965047-mona-lisa',
'md5': '3da12ebca28c67c111a7f8b262d3f7a7',
'info_dict': {
- "id": "16965047",
- "ext": "mp3",
- "title": "MONA LISA",
- "uploader": "ALKILADOS",
- "uploader_id": "216429",
- "thumbnail": "re:^https://gp1\.wac\.edgecastcdn\.net/.*?\.jpg$"
+ 'id': '16965047',
+ 'ext': 'mp3',
+ 'title': 'MONA LISA',
+ 'uploader': 'ALKILADOS',
+ 'uploader_id': '216429',
+ 'thumbnail': 're:^https://gp1\.wac\.edgecastcdn\.net/.*?\.jpg$'
},
}]
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+ int_or_none,
+ parse_iso8601,
+ unescapeHTML,
+ qualities,
+)
+
+
+class Revision3IE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:revision3|testtube|animalist)\.com)/(?P<id>[^/]+(?:/[^/?#]+)?)'
+ _TESTS = [{
+ 'url': 'http://www.revision3.com/technobuffalo/5-google-predictions-for-2016',
+ 'md5': 'd94a72d85d0a829766de4deb8daaf7df',
+ 'info_dict': {
+ 'id': '73034',
+ 'display_id': 'technobuffalo/5-google-predictions-for-2016',
+ 'ext': 'webm',
+ 'title': '5 Google Predictions for 2016',
+ 'description': 'Google had a great 2015, but it\'s already time to look ahead. Here are our five predictions for 2016.',
+ 'upload_date': '20151228',
+ 'timestamp': 1451325600,
+ 'duration': 187,
+ 'uploader': 'TechnoBuffalo',
+ 'uploader_id': 'technobuffalo',
+ }
+ }, {
+ 'url': 'http://testtube.com/brainstuff',
+ 'info_dict': {
+ 'id': '251',
+ 'title': 'BrainStuff',
+ 'description': 'Whether the topic is popcorn or particle physics, you can count on the HowStuffWorks team to explore-and explain-the everyday science in the world around us on BrainStuff.',
+ },
+ 'playlist_mincount': 93,
+ }, {
+ 'url': 'https://testtube.com/dnews/5-weird-ways-plants-can-eat-animals?utm_source=FB&utm_medium=DNews&utm_campaign=DNewsSocial',
+ 'info_dict': {
+ 'id': '60163',
+ 'display_id': 'dnews/5-weird-ways-plants-can-eat-animals',
+ 'duration': 275,
+ 'ext': 'webm',
+ 'title': '5 Weird Ways Plants Can Eat Animals',
+ 'description': 'Why have some plants evolved to eat meat?',
+ 'upload_date': '20150120',
+ 'timestamp': 1421763300,
+ 'uploader': 'DNews',
+ 'uploader_id': 'dnews',
+ },
+ }]
+ _PAGE_DATA_TEMPLATE = 'http://www.%s/apiProxy/ddn/%s?domain=%s'
+ _API_KEY = 'ba9c741bce1b9d8e3defcc22193f3651b8867e62'
+
+ def _real_extract(self, url):
+ domain, display_id = re.match(self._VALID_URL, url).groups()
+ page_info = self._download_json(
+ self._PAGE_DATA_TEMPLATE % (domain, display_id, domain), display_id)
+
+ if page_info['data']['type'] == 'episode':
+ episode_data = page_info['data']
+ video_id = compat_str(episode_data['video']['data']['id'])
+ video_data = self._download_json(
+ 'http://revision3.com/api/getPlaylist.json?api_key=%s&codecs=h264,vp8,theora&video_id=%s' % (self._API_KEY, video_id),
+ video_id)['items'][0]
+
+ formats = []
+ for vcodec, media in video_data['media'].items():
+ for quality_id, quality in media.items():
+ if quality_id == 'hls':
+ formats.extend(self._extract_m3u8_formats(
+ quality['url'], video_id, 'mp4',
+ 'm3u8_native', m3u8_id='hls', fatal=False))
+ else:
+ formats.append({
+ 'url': quality['url'],
+ 'format_id': '%s-%s' % (vcodec, quality_id),
+ 'tbr': int_or_none(quality.get('bitrate')),
+ 'vcodec': vcodec,
+ })
+ self._sort_formats(formats)
+
+ preference = qualities(['mini', 'small', 'medium', 'large'])
+ thumbnails = [{
+ 'url': image_url,
+ 'id': image_id,
+ 'preference': preference(image_id)
+ } for image_id, image_url in video_data.get('images', {}).items()]
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': unescapeHTML(video_data['title']),
+ 'description': unescapeHTML(video_data.get('summary')),
+ 'timestamp': parse_iso8601(episode_data.get('publishTime'), ' '),
+ 'author': episode_data.get('author'),
+ 'uploader': video_data.get('show', {}).get('name'),
+ 'uploader_id': video_data.get('show', {}).get('slug'),
+ 'duration': int_or_none(video_data.get('duration')),
+ 'thumbnails': thumbnails,
+ 'formats': formats,
+ }
+ else:
+ show_data = page_info['show']['data']
+ episodes_data = page_info['episodes']['data']
+ num_episodes = page_info['meta']['totalEpisodes']
+ processed_episodes = 0
+ entries = []
+ page_num = 1
+ while True:
+ entries.extend([self.url_result(
+ 'http://%s/%s/%s' % (domain, display_id, episode['slug'])) for episode in episodes_data])
+ processed_episodes += len(episodes_data)
+ if processed_episodes == num_episodes:
+ break
+ page_num += 1
+ episodes_data = self._download_json(self._PAGE_DATA_TEMPLATE % (
+ domain, display_id + '/' + compat_str(page_num), domain),
+ display_id)['episodes']['data']
+
+ return self.playlist_result(
+ entries, compat_str(show_data['id']),
+ show_data.get('name'), show_data.get('summary'))
class RingTVIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?ringtv\.craveonline\.com/(?P<type>news|videos/video)/(?P<id>[^/?#]+)'
_TEST = {
- "url": "http://ringtv.craveonline.com/news/310833-luis-collazo-says-victor-ortiz-better-not-quit-on-jan-30",
- "md5": "d25945f5df41cdca2d2587165ac28720",
- "info_dict": {
+ 'url': 'http://ringtv.craveonline.com/news/310833-luis-collazo-says-victor-ortiz-better-not-quit-on-jan-30',
+ 'md5': 'd25945f5df41cdca2d2587165ac28720',
+ 'info_dict': {
'id': '857645',
'ext': 'mp4',
- "title": 'Video: Luis Collazo says Victor Ortiz "better not quit on Jan. 30" - Ring TV',
- "description": 'Luis Collazo is excited about his Jan. 30 showdown with fellow former welterweight titleholder Victor Ortiz at Barclays Center in his hometown of Brooklyn. The SuperBowl week fight headlines a Golden Boy Live! card on Fox Sports 1.',
+ 'title': 'Video: Luis Collazo says Victor Ortiz "better not quit on Jan. 30" - Ring TV',
+ 'description': 'Luis Collazo is excited about his Jan. 30 showdown with fellow former welterweight titleholder Victor Ortiz at Barclays Center in his hometown of Brooklyn. The SuperBowl week fight headlines a Golden Boy Live! card on Fox Sports 1.',
}
}
description = self._html_search_regex(
r'addthis:description="([^"]+)"',
webpage, 'description', fatal=False)
- final_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/conversion/%s.mp4" % video_id
- thumbnail_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/snapshots/%s.jpg" % video_id
+ final_url = 'http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/conversion/%s.mp4' % video_id
+ thumbnail_url = 'http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/snapshots/%s.jpg' % video_id
return {
'id': video_id,
# coding: utf-8
from __future__ import unicode_literals
-from .common import InfoExtractor
+import re
+from .common import InfoExtractor
from ..utils import (
float_or_none,
+ parse_iso8601,
+ unescapeHTML,
)
class RteIE(InfoExtractor):
+ IE_NAME = 'rte'
+ IE_DESC = 'Raidió Teilifís Éireann TV'
_VALID_URL = r'https?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/[^/]+/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.rte.ie/player/ie/show/iwitness-862/10478715/',
'info_dict': {
'id': '10478715',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': 'Watch iWitness online',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'iWitness : The spirit of Ireland, one voice and one minute at a time.',
r'<meta name="thumbnail" content="uri:irus:(.*?)" />', webpage, 'thumbnail')
thumbnail = 'http://img.rasset.ie/' + thumbnail_id + '.jpg'
- feeds_url = self._html_search_meta("feeds-prefix", webpage, 'feeds url') + video_id
+ feeds_url = self._html_search_meta('feeds-prefix', webpage, 'feeds url') + video_id
json_string = self._download_json(feeds_url, video_id)
# f4m_url = server + relative_url
f4m_url = json_string['shows'][0]['media:group'][0]['rte:server'] + json_string['shows'][0]['media:group'][0]['url']
f4m_formats = self._extract_f4m_formats(f4m_url, video_id)
- f4m_formats = [{
- 'format_id': f['format_id'],
- 'url': f['url'],
- 'ext': 'mp4',
- 'width': f['width'],
- 'height': f['height'],
- } for f in f4m_formats]
return {
'id': video_id,
'thumbnail': thumbnail,
'duration': duration,
}
+
+
+class RteRadioIE(InfoExtractor):
+ IE_NAME = 'rte:radio'
+ IE_DESC = 'Raidió Teilifís Éireann radio'
+ # Radioplayer URLs have two distinct specifier formats,
+ # the old format #!rii=<channel_id>:<id>:<playable_item_id>:<date>:
+ # the new format #!rii=b<channel_id>_<id>_<playable_item_id>_<date>_
+ # where the IDs are int/empty, the date is DD-MM-YYYY, and the specifier may be truncated.
+ # An <id> uniquely defines an individual recording, and is the only part we require.
+ _VALID_URL = r'https?://(?:www\.)?rte\.ie/radio/utils/radioplayer/rteradioweb\.html#!rii=(?:b?[0-9]*)(?:%3A|:|%5F|_)(?P<id>[0-9]+)'
+
+ _TESTS = [{
+ # Old-style player URL; HLS and RTMPE formats
+ 'url': 'http://www.rte.ie/radio/utils/radioplayer/rteradioweb.html#!rii=16:10507902:2414:27-12-2015:',
+ 'info_dict': {
+ 'id': '10507902',
+ 'ext': 'mp4',
+ 'title': 'Gloria',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'description': 'md5:9ce124a7fb41559ec68f06387cabddf0',
+ 'timestamp': 1451203200,
+ 'upload_date': '20151227',
+ 'duration': 7230.0,
+ },
+ 'params': {
+ 'skip_download': 'f4m fails with --test atm'
+ }
+ }, {
+ # New-style player URL; RTMPE formats only
+ 'url': 'http://rte.ie/radio/utils/radioplayer/rteradioweb.html#!rii=b16_3250678_8861_06-04-2012_',
+ 'info_dict': {
+ 'id': '3250678',
+ 'ext': 'flv',
+ 'title': 'The Lyric Concert with Paul Herriott',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'description': '',
+ 'timestamp': 1333742400,
+ 'upload_date': '20120406',
+ 'duration': 7199.016,
+ },
+ 'params': {
+ 'skip_download': 'f4m fails with --test atm'
+ }
+ }]
+
+ def _real_extract(self, url):
+ item_id = self._match_id(url)
+
+ json_string = self._download_json(
+ 'http://www.rte.ie/rteavgen/getplaylist/?type=web&format=json&id=' + item_id,
+ item_id)
+
+ # NB the string values in the JSON are stored using XML escaping(!)
+ show = json_string['shows'][0]
+ title = unescapeHTML(show['title'])
+ description = unescapeHTML(show.get('description'))
+ thumbnail = show.get('thumbnail')
+ duration = float_or_none(show.get('duration'), 1000)
+ timestamp = parse_iso8601(show.get('published'))
+
+ mg = show['media:group'][0]
+
+ formats = []
+
+ if mg.get('url'):
+ m = re.match(r'(?P<url>rtmpe?://[^/]+)/(?P<app>.+)/(?P<playpath>mp4:.*)', mg['url'])
+ if m:
+ m = m.groupdict()
+ formats.append({
+ 'url': m['url'] + '/' + m['app'],
+ 'app': m['app'],
+ 'play_path': m['playpath'],
+ 'player_url': url,
+ 'ext': 'flv',
+ 'format_id': 'rtmp',
+ })
+
+ if mg.get('hls_server') and mg.get('hls_url'):
+ formats.extend(self._extract_m3u8_formats(
+ mg['hls_server'] + mg['hls_url'], item_id, 'mp4',
+ entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
+
+ if mg.get('hds_server') and mg.get('hds_url'):
+ formats.extend(self._extract_f4m_formats(
+ mg['hds_server'] + mg['hds_url'], item_id,
+ f4m_id='hds', fatal=False))
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': item_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'timestamp': timestamp,
+ 'duration': duration,
+ 'formats': formats,
+ }
download_url = video_info['streamurl']
download_url = download_url.replace('\\', '')
stream_url = 'mp4:' + self._html_search_regex(r'ondemand/(.*)', download_url, 'stream URL')
- rtmp_conn = ["S:connect", "O:1", "NS:pageUrl:" + url, "NB:fpad:0", "NN:videoFunction:1", "O:0"]
+ rtmp_conn = ['S:connect', 'O:1', 'NS:pageUrl:' + url, 'NB:fpad:0', 'NN:videoFunction:1', 'O:0']
formats = [{
'url': download_url,
import re
-from .common import InfoExtractor
+from .srgssr import SRGSSRIE
from ..compat import (
compat_str,
compat_urllib_parse_urlparse,
)
-class RTSIE(InfoExtractor):
+class RTSIE(SRGSSRIE):
IE_DESC = 'RTS.ch'
- _VALID_URL = r'''(?x)
- (?:
- rts:(?P<rts_id>\d+)|
- https?://
- (?:www\.)?rts\.ch/
- (?:
- (?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html|
- play/tv/[^/]+/video/(?P<display_id_new>.+?)\?id=(?P<id_new>[0-9]+)
- )
- )'''
+ _VALID_URL = r'rts:(?P<rts_id>\d+)|https?://(?:www\.)?rts\.ch/(?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html'
_TESTS = [
{
'url': 'http://www.rts.ch/archives/tv/divers/3449373-les-enfants-terribles.html',
- 'md5': '753b877968ad8afaeddccc374d4256a5',
+ 'md5': 'f254c4b26fb1d3c183793d52bc40d3e7',
'info_dict': {
'id': '3449373',
'display_id': 'les-enfants-terribles',
'thumbnail': 're:^https?://.*\.image',
'view_count': int,
},
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ }
},
{
'url': 'http://www.rts.ch/emissions/passe-moi-les-jumelles/5624067-entre-ciel-et-mer.html',
- 'md5': 'c148457a27bdc9e5b1ffe081a7a8337b',
+ 'md5': 'f1077ac5af686c76528dc8d7c5df29ba',
'info_dict': {
- 'id': '5624067',
- 'display_id': 'entre-ciel-et-mer',
+ 'id': '5742494',
+ 'display_id': '5742494',
'ext': 'mp4',
'duration': 3720,
'title': 'Les yeux dans les cieux - Mon homard au Canada',
'thumbnail': 're:^https?://.*\.image',
'view_count': int,
},
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ }
},
{
'url': 'http://www.rts.ch/video/sport/hockey/5745975-1-2-kloten-fribourg-5-2-second-but-pour-gotteron-par-kwiatowski.html',
},
{
'url': 'http://www.rts.ch/video/info/journal-continu/5745356-londres-cachee-par-un-epais-smog.html',
- 'md5': '9bb06503773c07ce83d3cbd793cebb91',
+ 'md5': '9f713382f15322181bb366cc8c3a4ff0',
'info_dict': {
'id': '5745356',
'display_id': 'londres-cachee-par-un-epais-smog',
'thumbnail': 're:^https?://.*\.image',
'view_count': int,
},
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ }
},
{
'url': 'http://www.rts.ch/audio/couleur3/programmes/la-belle-video-de-stephane-laurenceau/5706148-urban-hippie-de-damien-krisl-03-04-2014.html',
'timestamp': 1396551600,
},
},
- {
- 'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260',
- 'md5': '968777c8779e5aa2434be96c54e19743',
- 'info_dict': {
- 'id': '6348260',
- 'display_id': 'le-19h30',
- 'ext': 'mp4',
- 'duration': 1796,
- 'title': 'Le 19h30',
- 'description': '',
- 'uploader': 'Le 19h30',
- 'upload_date': '20141201',
- 'timestamp': 1417458600,
- 'thumbnail': 're:^https?://.*\.image',
- 'view_count': int,
- },
- },
{
# article with videos on rhs
'url': 'http://www.rts.ch/sport/hockey/6693917-hockey-davos-decroche-son-31e-titre-de-champion-de-suisse.html',
'title': 'Hockey: Davos décroche son 31e titre de champion de Suisse',
},
'playlist_mincount': 5,
- },
- {
- 'url': 'http://www.rts.ch/play/tv/le-19h30/video/le-chantier-du-nouveau-parlement-vaudois-a-permis-une-trouvaille-historique?id=6348280',
- 'only_matching': True,
}
]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
- video_id = m.group('rts_id') or m.group('id') or m.group('id_new')
- display_id = m.group('display_id') or m.group('display_id_new')
+ media_id = m.group('rts_id') or m.group('id')
+ display_id = m.group('display_id') or media_id
def download_json(internal_id):
return self._download_json(
'http://www.rts.ch/a/%s.html?f=json/article' % internal_id,
display_id)
- all_info = download_json(video_id)
+ all_info = download_json(media_id)
- # video_id extracted out of URL is not always a real id
+ # media_id extracted out of URL is not always a real id
if 'video' not in all_info and 'audio' not in all_info:
page = self._download_webpage(url, display_id)
# article with videos on rhs
videos = re.findall(
- r'<article[^>]+class="content-item"[^>]*>\s*<a[^>]+data-video-urn="urn:rts:video:(\d+)"',
+ r'<article[^>]+class="content-item"[^>]*>\s*<a[^>]+data-video-urn="urn:([^"]+)"',
page)
+ if not videos:
+ videos = re.findall(
+ r'(?s)<iframe[^>]+class="srg-player"[^>]+src="[^"]+urn:([^"]+)"',
+ page)
if videos:
- entries = [self.url_result('rts:%s' % video_urn, 'RTS') for video_urn in videos]
- return self.playlist_result(entries, video_id, self._og_search_title(page))
+ entries = [self.url_result('srgssr:%s' % video_urn, 'SRGSSR') for video_urn in videos]
+ return self.playlist_result(entries, media_id, self._og_search_title(page))
internal_id = self._html_search_regex(
r'<(?:video|audio) data-id="([0-9]+)"', page,
'internal video id')
all_info = download_json(internal_id)
+ media_type = 'video' if 'video' in all_info else 'audio'
+
+ # check for errors
+ self.get_media_data('rts', media_type, media_id)
+
info = all_info['video']['JSONinfo'] if 'video' in all_info else all_info['audio']
upload_timestamp = parse_iso8601(info.get('broadcast_date'))
formats = []
for format_id, format_url in info['streams'].items():
+ if format_id == 'hds_sd' and 'hds' in info['streams']:
+ continue
+ if format_id == 'hls_sd' and 'hls' in info['streams']:
+ continue
if format_url.endswith('.f4m'):
token = self._download_xml(
'http://tp.srgssr.ch/token/akahd.xml?stream=%s/*' % compat_urllib_parse_urlparse(format_url).path,
- video_id, 'Downloading %s token' % format_id)
+ media_id, 'Downloading %s token' % format_id)
auth_params = xpath_text(token, './/authparams', 'auth params')
if not auth_params:
continue
formats.extend(self._extract_f4m_formats(
'%s?%s&hdcore=3.4.0&plugin=aasp-3.4.0.132.66' % (format_url, auth_params),
- video_id, f4m_id=format_id))
+ media_id, f4m_id=format_id, fatal=False))
elif format_url.endswith('.m3u8'):
formats.extend(self._extract_m3u8_formats(
- format_url, video_id, 'mp4', m3u8_id=format_id))
+ format_url, media_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False))
else:
formats.append({
'format_id': format_id,
'tbr': media['rate'] or extract_bitrate(media['url']),
} for media in info['media'] if media.get('rate')])
- self._check_formats(formats, video_id)
+ self._check_formats(formats, media_id)
self._sort_formats(formats)
return {
- 'id': video_id,
+ 'id': media_id,
'display_id': display_id,
'formats': formats,
'title': info['title'],
--- /dev/null
+from __future__ import unicode_literals
+
+from .nuevo import NuevoBaseIE
+
+
+class RulePornIE(NuevoBaseIE):
+ _VALID_URL = r'https?://(?:www\.)?ruleporn\.com/(?:[^/?#&]+/)*(?P<id>[^/?#&]+)'
+ _TEST = {
+ 'url': 'http://ruleporn.com/brunette-nympho-chick-takes-her-boyfriend-in-every-angle/',
+ 'md5': '86861ebc624a1097c7c10eaf06d7d505',
+ 'info_dict': {
+ 'id': '48212',
+ 'display_id': 'brunette-nympho-chick-takes-her-boyfriend-in-every-angle',
+ 'ext': 'mp4',
+ 'title': 'Brunette Nympho Chick Takes Her Boyfriend In Every Angle',
+ 'description': 'md5:6d28be231b981fff1981deaaa03a04d5',
+ 'age_limit': 18,
+ 'duration': 635.1,
+ }
+ }
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, display_id)
+
+ video_id = self._search_regex(
+ r'lovehomeporn\.com/embed/(\d+)', webpage, 'video id')
+
+ title = self._search_regex(
+ r'<h2[^>]+title=(["\'])(?P<url>.+?)\1',
+ webpage, 'title', group='url')
+ description = self._html_search_meta('description', webpage)
+
+ info = self._extract_nuevo(
+ 'http://lovehomeporn.com/media/nuevo/econfig.php?key=%s&rp=true' % video_id,
+ video_id)
+ info.update({
+ 'display_id': display_id,
+ 'title': title,
+ 'description': description,
+ 'age_limit': 18
+ })
+ return info
class RutubeIE(InfoExtractor):
IE_NAME = 'rutube'
IE_DESC = 'Rutube videos'
- _VALID_URL = r'https?://rutube\.ru/video/(?P<id>[\da-z]{32})'
+ _VALID_URL = r'https?://rutube\.ru/(?:video|play/embed)/(?P<id>[\da-z]{32})'
- _TEST = {
+ _TESTS = [{
'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/',
'info_dict': {
'id': '3eac3b4561676c17df9132a9a1e62e3e',
# It requires ffmpeg (m3u8 download)
'skip_download': True,
},
- }
+ }, {
+ 'url': 'http://rutube.ru/play/embed/a10e53b86e8f349080f718582ce4c661',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
video_id = self._match_id(url)
for format_id, format_url in options['video_balancer'].items():
ext = determine_ext(format_url)
if ext == 'm3u8':
- m3u8_formats = self._extract_m3u8_formats(
- format_url, video_id, 'mp4', m3u8_id=format_id, fatal=False)
- if m3u8_formats:
- formats.extend(m3u8_formats)
+ formats.extend(self._extract_m3u8_formats(
+ format_url, video_id, 'mp4', m3u8_id=format_id, fatal=False))
elif ext == 'f4m':
- f4m_formats = self._extract_f4m_formats(
- format_url, video_id, f4m_id=format_id, fatal=False)
- if f4m_formats:
- formats.extend(f4m_formats)
+ formats.extend(self._extract_f4m_formats(
+ format_url, video_id, f4m_id=format_id, fatal=False))
else:
formats.append({
'url': format_url,
is_live = video_type == 'live'
json_data = self._download_json(
- 'http://player.rutv.ru/iframe/%splay/id/%s' % ('live-' if is_live else '', video_id),
+ 'http://player.rutv.ru/iframe/data%s/id/%s' % ('live' if is_live else 'video', video_id),
video_id, 'Downloading JSON')
if json_data['errors']:
processed_urls.append(video_url)
ext = determine_ext(video_url)
if ext == 'm3u8':
- m3u8_formats = self._extract_m3u8_formats(
- video_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
- if m3u8_formats:
- formats.extend(m3u8_formats)
+ formats.extend(self._extract_m3u8_formats(
+ video_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
elif ext == 'f4m':
- f4m_formats = self._extract_f4m_formats(
- video_url, video_id, f4m_id='hds', fatal=False)
- if f4m_formats:
- formats.extend(f4m_formats)
+ formats.extend(self._extract_f4m_formats(
+ video_url, video_id, f4m_id='hds', fatal=False))
else:
proto = compat_urllib_parse_urlparse(video_url).scheme
if not child.tag.startswith('HTTP') and proto != 'rtmp':
preference = -1 if proto == 'rtmp' else 1
label = child.get('label')
tbr = int_or_none(child.get('bitrate'))
+ format_id = '%s-%s' % (proto, label if label else tbr) if label or tbr else proto
+ if not self._is_valid_url(video_url, video_id, format_id):
+ continue
width, height = [int_or_none(x) for x in child.get('resolution', 'x').split('x')[:2]]
formats.append({
- 'format_id': '%s-%s' % (proto, label if label else tbr),
+ 'format_id': format_id,
'url': video_url,
'width': width,
'height': height,
from .common import InfoExtractor
from .brightcove import BrightcoveLegacyIE
-from ..compat import compat_urllib_parse
from ..utils import (
ExtractorError,
sanitized_Request,
smuggle_url,
std_headers,
+ urlencode_postdata,
)
}
request = sanitized_Request(
- self._LOGIN_URL, compat_urllib_parse.urlencode(login_form), headers=headers)
+ self._LOGIN_URL, urlencode_postdata(login_form), headers=headers)
login_page = self._download_webpage(
request, None, 'Logging in as %s' % username)
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+ int_or_none,
+ parse_age_limit,
+)
+
+
+class ScreenJunkiesIE(InfoExtractor):
+ _VALID_URL = r'http://www.screenjunkies.com/video/(?P<display_id>[^/]+?)(?:-(?P<id>\d+))?(?:[/?#&]|$)'
+ _TESTS = [{
+ 'url': 'http://www.screenjunkies.com/video/best-quentin-tarantino-movie-2841915',
+ 'md5': '5c2b686bec3d43de42bde9ec047536b0',
+ 'info_dict': {
+ 'id': '2841915',
+ 'display_id': 'best-quentin-tarantino-movie',
+ 'ext': 'mp4',
+ 'title': 'Best Quentin Tarantino Movie',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 3671,
+ 'age_limit': 13,
+ 'tags': list,
+ },
+ }, {
+ 'url': 'http://www.screenjunkies.com/video/honest-trailers-the-dark-knight',
+ 'info_dict': {
+ 'id': '2348808',
+ 'display_id': 'honest-trailers-the-dark-knight',
+ 'ext': 'mp4',
+ 'title': "Honest Trailers: 'The Dark Knight'",
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'age_limit': 10,
+ 'tags': list,
+ },
+ }, {
+ # requires subscription but worked around
+ 'url': 'http://www.screenjunkies.com/video/knocking-dead-ep-1-the-show-so-far-3003285',
+ 'info_dict': {
+ 'id': '3003285',
+ 'display_id': 'knocking-dead-ep-1-the-show-so-far',
+ 'ext': 'mp4',
+ 'title': 'Knocking Dead Ep 1: State of The Dead Recap',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 3307,
+ 'age_limit': 13,
+ 'tags': list,
+ },
+ }]
+
+ _DEFAULT_BITRATES = (48, 150, 496, 864, 2240)
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ display_id = mobj.group('display_id')
+
+ if not video_id:
+ webpage = self._download_webpage(url, display_id)
+ video_id = self._search_regex(
+ (r'src=["\']/embed/(\d+)', r'data-video-content-id=["\'](\d+)'),
+ webpage, 'video id')
+
+ webpage = self._download_webpage(
+ 'http://www.screenjunkies.com/embed/%s' % video_id,
+ display_id, 'Downloading video embed page')
+ embed_vars = self._parse_json(
+ self._search_regex(
+ r'(?s)embedVars\s*=\s*({.+?})\s*</script>', webpage, 'embed vars'),
+ display_id)
+
+ title = embed_vars['contentName']
+
+ formats = []
+ bitrates = []
+ for f in embed_vars.get('media', []):
+ if not f.get('uri') or f.get('mediaPurpose') != 'play':
+ continue
+ bitrate = int_or_none(f.get('bitRate'))
+ if bitrate:
+ bitrates.append(bitrate)
+ formats.append({
+ 'url': f['uri'],
+ 'format_id': 'http-%d' % bitrate if bitrate else 'http',
+ 'width': int_or_none(f.get('width')),
+ 'height': int_or_none(f.get('height')),
+ 'tbr': bitrate,
+ 'format': 'mp4',
+ })
+
+ if not bitrates:
+ # When subscriptionLevel > 0, i.e. plus subscription is required
+ # media list will be empty. However, hds and hls uris are still
+ # available. We can grab them assuming bitrates to be default.
+ bitrates = self._DEFAULT_BITRATES
+
+ auth_token = embed_vars.get('AuthToken')
+
+ def construct_manifest_url(base_url, ext):
+ pieces = [base_url]
+ pieces.extend([compat_str(b) for b in bitrates])
+ pieces.append('_kbps.mp4.%s?%s' % (ext, auth_token))
+ return ','.join(pieces)
+
+ if bitrates and auth_token:
+ hds_url = embed_vars.get('hdsUri')
+ if hds_url:
+ f4m_formats = self._extract_f4m_formats(
+ construct_manifest_url(hds_url, 'f4m'),
+ display_id, f4m_id='hds', fatal=False)
+ if len(f4m_formats) == len(bitrates):
+ for f, bitrate in zip(f4m_formats, bitrates):
+ if not f.get('tbr'):
+ f['format_id'] = 'hds-%d' % bitrate
+ f['tbr'] = bitrate
+ # TODO: fix f4m downloader to handle manifests without bitrates if possible
+ # formats.extend(f4m_formats)
+
+ hls_url = embed_vars.get('hlsUri')
+ if hls_url:
+ formats.extend(self._extract_m3u8_formats(
+ construct_manifest_url(hls_url, 'm3u8'),
+ display_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': title,
+ 'thumbnail': embed_vars.get('thumbUri'),
+ 'duration': int_or_none(embed_vars.get('videoLengthInSeconds')) or None,
+ 'age_limit': parse_age_limit(embed_vars.get('audienceRating')),
+ 'tags': embed_vars.get('tags', '').split(','),
+ 'formats': formats,
+ }
re.sub(
r'(?s)/\*.*?\*/', '',
self._search_regex(
- r"sources\s*:\s*(\[[^\]]+?\])", playerconfig,
+ r'sources\s*:\s*(\[[^\]]+?\])', playerconfig,
'sources',
).replace(
"' + thisObj.options.videoserver + '",
formats = []
for source in sources:
if source['type'] == 'hls':
- formats.extend(self._extract_m3u8_formats(source['file'], video_id))
+ formats.extend(self._extract_m3u8_formats(source['file'], video_id, ext='mp4'))
else:
file_ = source.get('file')
if not file_:
'upload_date': '20130401',
'description': 'Check out this and more on our website: http://teamfourstar.com\nTFS Store: http://sharkrobot.com/team-four-star\nFollow on Twitter: http://twitter.com/teamfourstar\nLike on FB: http://facebook.com/teamfourstar',
'title': 'A Moment With TFS Episode 4',
- }
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
}
def _real_extract(self, url):
class SenateISVPIE(InfoExtractor):
_COMM_MAP = [
- ["ag", "76440", "http://ag-f.akamaihd.net"],
- ["aging", "76442", "http://aging-f.akamaihd.net"],
- ["approps", "76441", "http://approps-f.akamaihd.net"],
- ["armed", "76445", "http://armed-f.akamaihd.net"],
- ["banking", "76446", "http://banking-f.akamaihd.net"],
- ["budget", "76447", "http://budget-f.akamaihd.net"],
- ["cecc", "76486", "http://srs-f.akamaihd.net"],
- ["commerce", "80177", "http://commerce1-f.akamaihd.net"],
- ["csce", "75229", "http://srs-f.akamaihd.net"],
- ["dpc", "76590", "http://dpc-f.akamaihd.net"],
- ["energy", "76448", "http://energy-f.akamaihd.net"],
- ["epw", "76478", "http://epw-f.akamaihd.net"],
- ["ethics", "76449", "http://ethics-f.akamaihd.net"],
- ["finance", "76450", "http://finance-f.akamaihd.net"],
- ["foreign", "76451", "http://foreign-f.akamaihd.net"],
- ["govtaff", "76453", "http://govtaff-f.akamaihd.net"],
- ["help", "76452", "http://help-f.akamaihd.net"],
- ["indian", "76455", "http://indian-f.akamaihd.net"],
- ["intel", "76456", "http://intel-f.akamaihd.net"],
- ["intlnarc", "76457", "http://intlnarc-f.akamaihd.net"],
- ["jccic", "85180", "http://jccic-f.akamaihd.net"],
- ["jec", "76458", "http://jec-f.akamaihd.net"],
- ["judiciary", "76459", "http://judiciary-f.akamaihd.net"],
- ["rpc", "76591", "http://rpc-f.akamaihd.net"],
- ["rules", "76460", "http://rules-f.akamaihd.net"],
- ["saa", "76489", "http://srs-f.akamaihd.net"],
- ["smbiz", "76461", "http://smbiz-f.akamaihd.net"],
- ["srs", "75229", "http://srs-f.akamaihd.net"],
- ["uscc", "76487", "http://srs-f.akamaihd.net"],
- ["vetaff", "76462", "http://vetaff-f.akamaihd.net"],
- ["arch", "", "http://ussenate-f.akamaihd.net/"]
+ ['ag', '76440', 'http://ag-f.akamaihd.net'],
+ ['aging', '76442', 'http://aging-f.akamaihd.net'],
+ ['approps', '76441', 'http://approps-f.akamaihd.net'],
+ ['armed', '76445', 'http://armed-f.akamaihd.net'],
+ ['banking', '76446', 'http://banking-f.akamaihd.net'],
+ ['budget', '76447', 'http://budget-f.akamaihd.net'],
+ ['cecc', '76486', 'http://srs-f.akamaihd.net'],
+ ['commerce', '80177', 'http://commerce1-f.akamaihd.net'],
+ ['csce', '75229', 'http://srs-f.akamaihd.net'],
+ ['dpc', '76590', 'http://dpc-f.akamaihd.net'],
+ ['energy', '76448', 'http://energy-f.akamaihd.net'],
+ ['epw', '76478', 'http://epw-f.akamaihd.net'],
+ ['ethics', '76449', 'http://ethics-f.akamaihd.net'],
+ ['finance', '76450', 'http://finance-f.akamaihd.net'],
+ ['foreign', '76451', 'http://foreign-f.akamaihd.net'],
+ ['govtaff', '76453', 'http://govtaff-f.akamaihd.net'],
+ ['help', '76452', 'http://help-f.akamaihd.net'],
+ ['indian', '76455', 'http://indian-f.akamaihd.net'],
+ ['intel', '76456', 'http://intel-f.akamaihd.net'],
+ ['intlnarc', '76457', 'http://intlnarc-f.akamaihd.net'],
+ ['jccic', '85180', 'http://jccic-f.akamaihd.net'],
+ ['jec', '76458', 'http://jec-f.akamaihd.net'],
+ ['judiciary', '76459', 'http://judiciary-f.akamaihd.net'],
+ ['rpc', '76591', 'http://rpc-f.akamaihd.net'],
+ ['rules', '76460', 'http://rules-f.akamaihd.net'],
+ ['saa', '76489', 'http://srs-f.akamaihd.net'],
+ ['smbiz', '76461', 'http://smbiz-f.akamaihd.net'],
+ ['srs', '75229', 'http://srs-f.akamaihd.net'],
+ ['uscc', '76487', 'http://srs-f.akamaihd.net'],
+ ['vetaff', '76462', 'http://vetaff-f.akamaihd.net'],
+ ['arch', '', 'http://ussenate-f.akamaihd.net/']
]
_IE_NAME = 'senate.gov'
_VALID_URL = r'http://www\.senate\.gov/isvp/?\?(?P<qs>.+)'
'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png',
'info_dict': {
'id': 'judiciary031715',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'Integrated Senate Video Player',
'thumbnail': 're:^https?://.*\.(?:jpg|png)$',
- }
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
}, {
'url': 'http://www.senate.gov/isvp/?type=live&comm=commerce&filename=commerce011514.mp4&auto_play=false',
'info_dict': {
'id': 'commerce011514',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'Integrated Senate Video Player'
- }
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
}, {
'url': 'http://www.senate.gov/isvp/?type=arch&comm=intel&filename=intel090613&hc_location=ufi',
# checksum differs each time
'https://shahid.mbc.net/arContent/getPlayerContent-param-.id-%s.type-%s.html'
% (video_id, api_vars['type']), video_id, 'Downloading player JSON')
+ if player.get('drm'):
+ raise ExtractorError('This video is DRM protected.', expected=True)
+
formats = self._extract_m3u8_formats(player['url'], video_id, 'mp4')
video = self._download_json(
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+ parse_iso8601,
+ parse_duration,
+)
+
+
+class SkyNewsArabiaBaseIE(InfoExtractor):
+ _IMAGE_BASE_URL = 'http://www.skynewsarabia.com/web/images'
+
+ def _call_api(self, path, value):
+ return self._download_json('http://api.skynewsarabia.com/web/rest/v2/%s/%s.json' % (path, value), value)
+
+ def _get_limelight_media_id(self, url):
+ return self._search_regex(r'/media/[^/]+/([a-z0-9]{32})', url, 'limelight media id')
+
+ def _get_image_url(self, image_path_template, width='1600', height='1200'):
+ return self._IMAGE_BASE_URL + image_path_template.format(width=width, height=height)
+
+ def _extract_video_info(self, video_data):
+ video_id = compat_str(video_data['id'])
+ topic = video_data.get('topicTitle')
+ return {
+ '_type': 'url_transparent',
+ 'url': 'limelight:media:%s' % self._get_limelight_media_id(video_data['videoUrl'][0]['url']),
+ 'id': video_id,
+ 'title': video_data['headline'],
+ 'description': video_data.get('summary'),
+ 'thumbnail': self._get_image_url(video_data['mediaAsset']['imageUrl']),
+ 'timestamp': parse_iso8601(video_data.get('date')),
+ 'duration': parse_duration(video_data.get('runTime')),
+ 'tags': video_data.get('tags', []),
+ 'categories': [topic] if topic else [],
+ 'webpage_url': 'http://www.skynewsarabia.com/web/video/%s' % video_id,
+ 'ie_key': 'LimelightMedia',
+ }
+
+
+class SkyNewsArabiaIE(SkyNewsArabiaBaseIE):
+ IE_NAME = 'skynewsarabia:video'
+ _VALID_URL = r'https?://(?:www\.)?skynewsarabia\.com/web/video/(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://www.skynewsarabia.com/web/video/794902/%D9%86%D8%B5%D9%81-%D9%85%D9%84%D9%8A%D9%88%D9%86-%D9%85%D8%B5%D8%A8%D8%A7%D8%AD-%D8%B4%D8%AC%D8%B1%D8%A9-%D9%83%D8%B1%D9%8A%D8%B3%D9%85%D8%A7%D8%B3',
+ 'info_dict': {
+ 'id': '794902',
+ 'ext': 'flv',
+ 'title': 'نصف مليون مصباح على شجرة كريسماس',
+ 'description': 'md5:22f1b27f0850eeb10c7e59b1f16eb7c6',
+ 'upload_date': '20151128',
+ 'timestamp': 1448697198,
+ 'duration': 2119,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ video_data = self._call_api('video', video_id)
+ return self._extract_video_info(video_data)
+
+
+class SkyNewsArabiaArticleIE(SkyNewsArabiaBaseIE):
+ IE_NAME = 'skynewsarabia:video'
+ _VALID_URL = r'https?://(?:www\.)?skynewsarabia\.com/web/article/(?P<id>[0-9]+)'
+ _TESTS = [{
+ 'url': 'http://www.skynewsarabia.com/web/article/794549/%D8%A7%D9%94%D8%AD%D8%AF%D8%A7%D8%AB-%D8%A7%D9%84%D8%B4%D8%B1%D9%82-%D8%A7%D9%84%D8%A7%D9%94%D9%88%D8%B3%D8%B7-%D8%AE%D8%B1%D9%8A%D8%B7%D8%A9-%D8%A7%D9%84%D8%A7%D9%94%D9%84%D8%B9%D8%A7%D8%A8-%D8%A7%D9%84%D8%B0%D9%83%D9%8A%D8%A9',
+ 'info_dict': {
+ 'id': '794549',
+ 'ext': 'flv',
+ 'title': 'بالفيديو.. ألعاب ذكية تحاكي واقع المنطقة',
+ 'description': 'md5:0c373d29919a851e080ee4edd0c5d97f',
+ 'upload_date': '20151126',
+ 'timestamp': 1448559336,
+ 'duration': 281.6,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://www.skynewsarabia.com/web/article/794844/%D8%A7%D8%B3%D8%AA%D9%87%D8%AF%D8%A7%D9%81-%D9%82%D9%88%D8%A7%D8%B1%D8%A8-%D8%A7%D9%94%D8%B3%D9%84%D8%AD%D8%A9-%D9%84%D9%85%D9%8A%D9%84%D9%8A%D8%B4%D9%8A%D8%A7%D8%AA-%D8%A7%D9%84%D8%AD%D9%88%D8%AB%D9%8A-%D9%88%D8%B5%D8%A7%D9%84%D8%AD',
+ 'info_dict': {
+ 'id': '794844',
+ 'title': 'إحباط تهريب أسلحة لميليشيات الحوثي وصالح بجنوب اليمن',
+ 'description': 'md5:5c927b8b2e805796e7f693538d96fc7e',
+ },
+ 'playlist_mincount': 2,
+ }]
+
+ def _real_extract(self, url):
+ article_id = self._match_id(url)
+ article_data = self._call_api('article', article_id)
+ media_asset = article_data['mediaAsset']
+ if media_asset['type'] == 'VIDEO':
+ topic = article_data.get('topicTitle')
+ return {
+ '_type': 'url_transparent',
+ 'url': 'limelight:media:%s' % self._get_limelight_media_id(media_asset['videoUrl'][0]['url']),
+ 'id': article_id,
+ 'title': article_data['headline'],
+ 'description': article_data.get('summary'),
+ 'thumbnail': self._get_image_url(media_asset['imageUrl']),
+ 'timestamp': parse_iso8601(article_data.get('date')),
+ 'tags': article_data.get('tags', []),
+ 'categories': [topic] if topic else [],
+ 'webpage_url': url,
+ 'ie_key': 'LimelightMedia',
+ }
+ entries = [self._extract_video_info(item) for item in article_data.get('inlineItems', []) if item['type'] == 'VIDEO']
+ return self.playlist_result(entries, article_id, article_data['headline'], article_data.get('summary'))
'info_dict': {
'id': 'TD73btpBqSxc',
'ext': 'mp4',
- "title": "virginie baisee en cam",
- "age_limit": 18,
+ 'title': 'virginie baisee en cam',
+ 'age_limit': 18,
'thumbnail': 're:https?://.*?\.jpg'
}
}
'getvideoinfo': '1',
}
- video_password = self._downloader.params.get('videopassword', None)
+ video_password = self._downloader.params.get('videopassword')
if video_password:
video_form['pass'] = hashlib.md5(video_password.encode('utf-8')).hexdigest()
url = 'http://smotri.com/broadcast/view/url/?ticket=%s' % ticket
- broadcast_password = self._downloader.params.get('videopassword', None)
+ broadcast_password = self._downloader.params.get('videopassword')
if broadcast_password:
url += '&pass=%s' % hashlib.md5(broadcast_password.encode('utf-8')).hexdigest()
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
- video_url = "http://cdn.videos.snotr.com/%s.flv" % video_id
+ video_url = 'http://cdn.videos.snotr.com/%s.flv' % video_id
view_count = str_to_int(self._html_search_regex(
r'<p>\n<strong>Views:</strong>\n([\d,\.]+)</p>',
'file': clips_url[i],
'new': su[i],
'prod': 'flash',
+ 'rb': 1,
}
if cdnId is not None:
+++ /dev/null
-# encoding: utf-8
-from __future__ import unicode_literals
-
-import re
-
-from .crunchyroll import CrunchyrollIE
-
-from .common import InfoExtractor
-from ..compat import compat_HTTPError
-from ..utils import (
- ExtractorError,
- int_or_none,
- remove_start,
- xpath_text,
-)
-
-
-class SoompiBaseIE(InfoExtractor):
- def _get_episodes(self, webpage, episode_filter=None):
- episodes = self._parse_json(
- self._search_regex(
- r'VIDEOS\s*=\s*(\[.+?\]);', webpage, 'episodes JSON'),
- None)
- return list(filter(episode_filter, episodes))
-
-
-class SoompiIE(SoompiBaseIE, CrunchyrollIE):
- IE_NAME = 'soompi'
- _VALID_URL = r'https?://tv\.soompi\.com/(?:en/)?watch/(?P<id>[0-9]+)'
- _TESTS = [{
- 'url': 'http://tv.soompi.com/en/watch/29235',
- 'info_dict': {
- 'id': '29235',
- 'ext': 'mp4',
- 'title': 'Episode 1096',
- 'description': '2015-05-20'
- },
- 'params': {
- 'skip_download': True,
- },
- }]
-
- def _get_episode(self, webpage, video_id):
- return self._get_episodes(webpage, lambda x: x['id'] == video_id)[0]
-
- def _get_subtitles(self, config, video_id):
- sub_langs = {}
- for subtitle in config.findall('./{default}preload/subtitles/subtitle'):
- sub_langs[subtitle.attrib['id']] = subtitle.attrib['title']
-
- subtitles = {}
- for s in config.findall('./{default}preload/subtitle'):
- lang_code = sub_langs.get(s.attrib['id'])
- if not lang_code:
- continue
- sub_id = s.get('id')
- data = xpath_text(s, './data', 'data')
- iv = xpath_text(s, './iv', 'iv')
- if not id or not iv or not data:
- continue
- subtitle = self._decrypt_subtitles(data, iv, sub_id).decode('utf-8')
- subtitles[lang_code] = self._extract_subtitles(subtitle)
- return subtitles
-
- def _real_extract(self, url):
- video_id = self._match_id(url)
-
- try:
- webpage = self._download_webpage(
- url, video_id, 'Downloading episode page')
- except ExtractorError as ee:
- if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
- webpage = ee.cause.read()
- block_message = self._html_search_regex(
- r'(?s)<div class="block-message">(.+?)</div>', webpage,
- 'block message', default=None)
- if block_message:
- raise ExtractorError(block_message, expected=True)
- raise
-
- formats = []
- config = None
- for format_id in re.findall(r'\?quality=([0-9a-zA-Z]+)', webpage):
- config = self._download_xml(
- 'http://tv.soompi.com/en/show/_/%s-config.xml?mode=hls&quality=%s' % (video_id, format_id),
- video_id, 'Downloading %s XML' % format_id)
- m3u8_url = xpath_text(
- config, './{default}preload/stream_info/file',
- '%s m3u8 URL' % format_id)
- if not m3u8_url:
- continue
- formats.extend(self._extract_m3u8_formats(
- m3u8_url, video_id, 'mp4', m3u8_id=format_id))
- self._sort_formats(formats)
-
- episode = self._get_episode(webpage, video_id)
-
- title = episode['name']
- description = episode.get('description')
- duration = int_or_none(episode.get('duration'))
-
- thumbnails = [{
- 'id': thumbnail_id,
- 'url': thumbnail_url,
- } for thumbnail_id, thumbnail_url in episode.get('img_url', {}).items()]
-
- subtitles = self.extract_subtitles(config, video_id)
-
- return {
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'thumbnails': thumbnails,
- 'duration': duration,
- 'formats': formats,
- 'subtitles': subtitles
- }
-
-
-class SoompiShowIE(SoompiBaseIE):
- IE_NAME = 'soompi:show'
- _VALID_URL = r'https?://tv\.soompi\.com/en/shows/(?P<id>[0-9a-zA-Z\-_]+)'
- _TESTS = [{
- 'url': 'http://tv.soompi.com/en/shows/liar-game',
- 'info_dict': {
- 'id': 'liar-game',
- 'title': 'Liar Game',
- 'description': 'md5:52c02bce0c1a622a95823591d0589b66',
- },
- 'playlist_count': 14,
- }]
-
- def _real_extract(self, url):
- show_id = self._match_id(url)
-
- webpage = self._download_webpage(
- url, show_id, 'Downloading show page')
-
- title = remove_start(self._og_search_title(webpage), 'SoompiTV | ')
- description = self._og_search_description(webpage)
-
- entries = [
- self.url_result('http://tv.soompi.com/en/watch/%s' % episode['id'], 'Soompi')
- for episode in self._get_episodes(webpage)]
-
- return self.playlist_result(entries, show_id, title, description)
full_title = track_id
token = mobj.group('secret_token')
if token:
- info_json_url += "&secret_token=" + token
+ info_json_url += '&secret_token=' + token
elif mobj.group('player'):
query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
real_url = query['url'][0]
resource = mobj.group('rsrc') or 'all'
base_url = self._BASE_URL_MAP[resource] % user['id']
- next_href = None
+ COMMON_QUERY = {
+ 'limit': 50,
+ 'client_id': self._CLIENT_ID,
+ 'linked_partitioning': '1',
+ }
+
+ query = COMMON_QUERY.copy()
+ query['offset'] = 0
+
+ next_href = base_url + '?' + compat_urllib_parse.urlencode(query)
entries = []
for i in itertools.count():
- if not next_href:
- data = compat_urllib_parse.urlencode({
- 'offset': i * 50,
- 'limit': 50,
- 'client_id': self._CLIENT_ID,
- 'linked_partitioning': '1',
- 'representation': 'speedy',
- })
- next_href = base_url + '?' + data
-
response = self._download_json(
next_href, uploader, 'Downloading track page %s' % (i + 1))
collection = response['collection']
-
if not collection:
- self.to_screen('%s: End page received' % uploader)
break
def resolve_permalink_url(candidates):
if permalink_url:
entries.append(self.url_result(permalink_url))
- if 'next_href' in response:
- next_href = response['next_href']
- if not next_href:
- break
- else:
- next_href = None
+ next_href = response.get('next_href')
+ if not next_href:
+ break
+
+ parsed_next_href = compat_urlparse.urlparse(response['next_href'])
+ qs = compat_urlparse.parse_qs(parsed_next_href.query)
+ qs.update(COMMON_QUERY)
+ next_href = compat_urlparse.urlunparse(
+ parsed_next_href._replace(query=compat_urllib_parse.urlencode(qs, True)))
return {
'_type': 'playlist',
class SpankBangIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www|[a-z]{2})\.)?spankbang\.com/(?P<id>[\da-z]+)/video'
- _TEST = {
+ _TESTS = [{
'url': 'http://spankbang.com/3vvn/video/fantasy+solo',
'md5': '1cc433e1d6aa14bc376535b8679302f7',
'info_dict': {
'uploader': 'silly2587',
'age_limit': 18,
}
- }
+ }, {
+ # 480p only
+ 'url': 'http://spankbang.com/1vt0/video/solvane+gangbang',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
video_id = self._match_id(url)
'ext': 'mp4',
'format_id': '%sp' % height,
'height': int(height),
- } for height in re.findall(r'<span[^>]+q_(\d+)p', webpage)]
+ } for height in re.findall(r'<(?:span|li|p)[^>]+[qb]_(\d+)p', webpage)]
+ self._check_formats(formats, video_id)
self._sort_formats(formats)
title = self._html_search_regex(
- r'(?s)<h1>(.+?)</h1>', webpage, 'title')
+ r'(?s)<h1[^>]*>(.+?)</h1>', webpage, 'title')
description = self._search_regex(
r'class="desc"[^>]*>([^<]+)',
webpage, 'description', default=None)
description = self._html_search_meta('description', webpage, 'description')
base_url = self._search_regex(
- r'var\s+server\s*=\s*"([^"]+)\"', webpage, 'server URL')
+ [r'server\s*:\s*(["\'])(?P<url>.+?)\1', r'var\s+server\s*=\s*"(?P<url>[^"]+)\"'],
+ webpage, 'server URL', group='url')
xml_url = base_url + video_id + '.xml'
idoc = self._download_xml(xml_url, video_id)
smil_doc = self._download_xml(
smil_url, video_id, note='Downloading SMIL metadata')
- base_url = smil_doc.find('./head/meta').attrib['base']
+ base_url_el = smil_doc.find('./head/meta')
+ if base_url_el:
+ base_url = base_url_el.attrib['base']
formats.extend([{
'format_id': 'rmtp',
- 'url': base_url,
+ 'url': base_url if base_url_el else n.attrib['src'],
'play_path': n.attrib['src'],
'ext': 'flv',
'preference': -100,
+++ /dev/null
-# coding: utf-8
-from __future__ import unicode_literals
-
-import re
-from .common import InfoExtractor
-from ..utils import (
- determine_ext,
- parse_iso8601,
- xpath_text,
-)
-
-
-class SrfIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.srf\.ch/play(?:er)?/tv/[^/]+/video/(?P<display_id>[^?]+)\?id=|tp\.srgssr\.ch/p/flash\?urn=urn:srf:ais:video:)(?P<id>[0-9a-f\-]{36})'
- _TESTS = [{
- 'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
- 'md5': '4cd93523723beff51bb4bee974ee238d',
- 'info_dict': {
- 'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5',
- 'display_id': 'snowden-beantragt-asyl-in-russland',
- 'ext': 'm4v',
- 'upload_date': '20130701',
- 'title': 'Snowden beantragt Asyl in Russland',
- 'timestamp': 1372713995,
- }
- }, {
- # No Speichern (Save) button
- 'url': 'http://www.srf.ch/play/tv/top-gear/video/jaguar-xk120-shadow-und-tornado-dampflokomotive?id=677f5829-e473-4823-ac83-a1087fe97faa',
- 'md5': 'd97e236e80d1d24729e5d0953d276a4f',
- 'info_dict': {
- 'id': '677f5829-e473-4823-ac83-a1087fe97faa',
- 'display_id': 'jaguar-xk120-shadow-und-tornado-dampflokomotive',
- 'ext': 'flv',
- 'upload_date': '20130710',
- 'title': 'Jaguar XK120, Shadow und Tornado-Dampflokomotive',
- 'timestamp': 1373493600,
- },
- }, {
- 'url': 'http://www.srf.ch/player/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
- 'only_matching': True,
- }, {
- 'url': 'https://tp.srgssr.ch/p/flash?urn=urn:srf:ais:video:28e1a57d-5b76-4399-8ab3-9097f071e6c5',
- 'only_matching': True,
- }]
-
- def _real_extract(self, url):
- video_id = self._match_id(url)
- display_id = re.match(self._VALID_URL, url).group('display_id') or video_id
-
- video_data = self._download_xml(
- 'http://il.srgssr.ch/integrationlayer/1.0/ue/srf/video/play/%s.xml' % video_id,
- display_id)
-
- title = xpath_text(
- video_data, './AssetMetadatas/AssetMetadata/title', fatal=True)
- thumbnails = [{
- 'url': s.text
- } for s in video_data.findall('.//ImageRepresentation/url')]
- timestamp = parse_iso8601(xpath_text(video_data, './createdDate'))
- # The <duration> field in XML is different from the exact duration, skipping
-
- formats = []
- for item in video_data.findall('./Playlists/Playlist') + video_data.findall('./Downloads/Download'):
- for url_node in item.findall('url'):
- quality = url_node.attrib['quality']
- full_url = url_node.text
- original_ext = determine_ext(full_url)
- format_id = '%s-%s' % (quality, item.attrib['protocol'])
- if original_ext == 'f4m':
- formats.extend(self._extract_f4m_formats(
- full_url + '?hdcore=3.4.0', display_id, f4m_id=format_id))
- elif original_ext == 'm3u8':
- formats.extend(self._extract_m3u8_formats(
- full_url, display_id, 'mp4', m3u8_id=format_id))
- else:
- formats.append({
- 'url': full_url,
- 'ext': original_ext,
- 'format_id': format_id,
- 'quality': 0 if 'HD' in quality else -1,
- 'preference': 1,
- })
-
- self._sort_formats(formats)
-
- subtitles = {}
- subtitles_data = video_data.find('Subtitles')
- if subtitles_data is not None:
- subtitles_list = [{
- 'url': sub.text,
- 'ext': determine_ext(sub.text),
- } for sub in subtitles_data]
- if subtitles_list:
- subtitles['de'] = subtitles_list
-
- return {
- 'id': video_id,
- 'display_id': display_id,
- 'formats': formats,
- 'title': title,
- 'thumbnails': thumbnails,
- 'timestamp': timestamp,
- 'subtitles': subtitles,
- }
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ parse_iso8601,
+ qualities,
+)
+
+
+class SRGSSRIE(InfoExtractor):
+ _VALID_URL = r'(?:https?://tp\.srgssr\.ch/p(?:/[^/]+)+\?urn=urn|srgssr):(?P<bu>srf|rts|rsi|rtr|swi):(?:[^:]+:)?(?P<type>video|audio):(?P<id>[0-9a-f\-]{36}|\d+)'
+
+ _ERRORS = {
+ 'AGERATING12': 'To protect children under the age of 12, this video is only available between 8 p.m. and 6 a.m.',
+ 'AGERATING18': 'To protect children under the age of 18, this video is only available between 11 p.m. and 5 a.m.',
+ # 'ENDDATE': 'For legal reasons, this video was only available for a specified period of time.',
+ 'GEOBLOCK': 'For legal reasons, this video is only available in Switzerland.',
+ 'LEGAL': 'The video cannot be transmitted for legal reasons.',
+ 'STARTDATE': 'This video is not yet available. Please try again later.',
+ }
+
+ def get_media_data(self, bu, media_type, media_id):
+ media_data = self._download_json(
+ 'http://il.srgssr.ch/integrationlayer/1.0/ue/%s/%s/play/%s.json' % (bu, media_type, media_id),
+ media_id)[media_type.capitalize()]
+
+ if media_data.get('block') and media_data['block'] in self._ERRORS:
+ raise ExtractorError('%s said: %s' % (
+ self.IE_NAME, self._ERRORS[media_data['block']]), expected=True)
+
+ return media_data
+
+ def _real_extract(self, url):
+ bu, media_type, media_id = re.match(self._VALID_URL, url).groups()
+
+ if bu == 'rts':
+ return self.url_result('rts:%s' % media_id, 'RTS')
+
+ media_data = self.get_media_data(bu, media_type, media_id)
+
+ metadata = media_data['AssetMetadatas']['AssetMetadata'][0]
+ title = metadata['title']
+ description = metadata.get('description')
+ created_date = media_data.get('createdDate') or metadata.get('createdDate')
+ timestamp = parse_iso8601(created_date)
+
+ thumbnails = [{
+ 'id': image.get('id'),
+ 'url': image['url'],
+ } for image in media_data.get('Image', {}).get('ImageRepresentations', {}).get('ImageRepresentation', [])]
+
+ preference = qualities(['LQ', 'MQ', 'SD', 'HQ', 'HD'])
+ formats = []
+ for source in media_data.get('Playlists', {}).get('Playlist', []) + media_data.get('Downloads', {}).get('Download', []):
+ protocol = source.get('@protocol')
+ for asset in source['url']:
+ asset_url = asset['text']
+ quality = asset['@quality']
+ format_id = '%s-%s' % (protocol, quality)
+ if protocol == 'HTTP-HDS':
+ formats.extend(self._extract_f4m_formats(
+ asset_url + '?hdcore=3.4.0', media_id,
+ f4m_id=format_id, fatal=False))
+ elif protocol == 'HTTP-HLS':
+ formats.extend(self._extract_m3u8_formats(
+ asset_url, media_id, 'mp4', 'm3u8_native',
+ m3u8_id=format_id, fatal=False))
+ else:
+ formats.append({
+ 'format_id': format_id,
+ 'url': asset_url,
+ 'preference': preference(quality),
+ 'ext': 'flv' if protocol == 'RTMP' else None,
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': media_id,
+ 'title': title,
+ 'description': description,
+ 'timestamp': timestamp,
+ 'thumbnails': thumbnails,
+ 'formats': formats,
+ }
+
+
+class SRGSSRPlayIE(InfoExtractor):
+ IE_DESC = 'srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites'
+ _VALID_URL = r'https?://(?:(?:www|play)\.)?(?P<bu>srf|rts|rsi|rtr|swissinfo)\.ch/play/(?:tv|radio)/[^/]+/(?P<type>video|audio)/[^?]+\?id=(?P<id>[0-9a-f\-]{36}|\d+)'
+
+ _TESTS = [{
+ 'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
+ 'md5': '4cd93523723beff51bb4bee974ee238d',
+ 'info_dict': {
+ 'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5',
+ 'ext': 'm4v',
+ 'upload_date': '20130701',
+ 'title': 'Snowden beantragt Asyl in Russland',
+ 'timestamp': 1372713995,
+ }
+ }, {
+ # No Speichern (Save) button
+ 'url': 'http://www.srf.ch/play/tv/top-gear/video/jaguar-xk120-shadow-und-tornado-dampflokomotive?id=677f5829-e473-4823-ac83-a1087fe97faa',
+ 'md5': '0a274ce38fda48c53c01890651985bc6',
+ 'info_dict': {
+ 'id': '677f5829-e473-4823-ac83-a1087fe97faa',
+ 'ext': 'flv',
+ 'upload_date': '20130710',
+ 'title': 'Jaguar XK120, Shadow und Tornado-Dampflokomotive',
+ 'description': 'md5:88604432b60d5a38787f152dec89cd56',
+ 'timestamp': 1373493600,
+ },
+ }, {
+ 'url': 'http://www.rtr.ch/play/radio/actualitad/audio/saira-tujetsch-tuttina-cuntinuar-cun-sedrun-muster-turissem?id=63cb0778-27f8-49af-9284-8c7a8c6d15fc',
+ 'info_dict': {
+ 'id': '63cb0778-27f8-49af-9284-8c7a8c6d15fc',
+ 'ext': 'mp3',
+ 'upload_date': '20151013',
+ 'title': 'Saira: Tujetsch - tuttina cuntinuar cun Sedrun Mustér Turissem',
+ 'timestamp': 1444750398,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260',
+ 'md5': '67a2a9ae4e8e62a68d0e9820cc9782df',
+ 'info_dict': {
+ 'id': '6348260',
+ 'display_id': '6348260',
+ 'ext': 'mp4',
+ 'duration': 1796,
+ 'title': 'Le 19h30',
+ 'description': '',
+ 'uploader': '19h30',
+ 'upload_date': '20141201',
+ 'timestamp': 1417458600,
+ 'thumbnail': 're:^https?://.*\.image',
+ 'view_count': int,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ }
+ }]
+
+ def _real_extract(self, url):
+ bu, media_type, media_id = re.match(self._VALID_URL, url).groups()
+ # other info can be extracted from url + '&layout=json'
+ return self.url_result('srgssr:%s:%s:%s' % (bu[:3], media_type, media_id), 'SRGSSR')
# encoding: utf-8
from __future__ import unicode_literals
-import json
+from .ard import ARDMediathekIE
+from ..utils import (
+ ExtractorError,
+ get_element_by_attribute,
+)
-from .common import InfoExtractor
-from ..utils import js_to_json
-
-class SRMediathekIE(InfoExtractor):
+class SRMediathekIE(ARDMediathekIE):
IE_DESC = 'Saarländischer Rundfunk'
_VALID_URL = r'https?://sr-mediathek\.sr-online\.de/index\.php\?.*?&id=(?P<id>[0-9]+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://sr-mediathek.sr-online.de/index.php?seite=7&id=28455',
'info_dict': {
'id': '28455',
'description': 'Ringen: KSV Köllerbach gegen Aachen-Walheim; Frauen-Fußball: 1. FC Saarbrücken gegen Sindelfingen; Motorsport: Rallye in Losheim; dazu: Interview mit Timo Bernhard; Turnen: TG Saar; Reitsport: Deutscher Voltigier-Pokal; Badminton: Interview mit Michael Fuchs ',
'thumbnail': 're:^https?://.*\.jpg$',
},
- }
+ 'skip': 'no longer available',
+ }, {
+ 'url': 'http://sr-mediathek.sr-online.de/index.php?seite=7&id=37682',
+ 'info_dict': {
+ 'id': '37682',
+ 'ext': 'mp4',
+ 'title': 'Love, Cakes and Rock\'n\'Roll',
+ 'description': 'md5:18bf9763631c7d326c22603681e1123d',
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ 'expected_warnings': ['Unable to download f4m manifest']
+ }]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
- murls = json.loads(js_to_json(self._search_regex(
- r'var mediaURLs\s*=\s*(.*?);\n', webpage, 'video URLs')))
- formats = [{'url': murl} for murl in murls]
- self._sort_formats(formats)
-
- title = json.loads(js_to_json(self._search_regex(
- r'var mediaTitles\s*=\s*(.*?);\n', webpage, 'title')))[0]
+ if '>Der gewünschte Beitrag ist leider nicht mehr verfügbar.<' in webpage:
+ raise ExtractorError('Video %s is no longer available' % video_id, expected=True)
- return {
+ media_collection_url = self._search_regex(
+ r'data-mediacollection-ardplayer="([^"]+)"', webpage, 'media collection url')
+ info = self._extract_media_info(media_collection_url, webpage, video_id)
+ info.update({
'id': video_id,
- 'title': title,
- 'formats': formats,
+ 'title': get_element_by_attribute('class', 'ardplayer-title', webpage),
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
- }
+ })
+ return info
_VIDEO_PAGE_TEMPLATE = 'http://store.steampowered.com/video/%s/'
_AGECHECK_TEMPLATE = 'http://store.steampowered.com/agecheck/video/%s/?snr=1_agecheck_agecheck__age-gate&ageDay=1&ageMonth=January&ageYear=1970'
_TESTS = [{
- "url": "http://store.steampowered.com/video/105600/",
- "playlist": [
+ 'url': 'http://store.steampowered.com/video/105600/',
+ 'playlist': [
{
- "md5": "f870007cee7065d7c76b88f0a45ecc07",
- "info_dict": {
+ 'md5': 'f870007cee7065d7c76b88f0a45ecc07',
+ 'info_dict': {
'id': '81300',
'ext': 'flv',
- "title": "Terraria 1.1 Trailer",
+ 'title': 'Terraria 1.1 Trailer',
'playlist_index': 1,
}
},
{
- "md5": "61aaf31a5c5c3041afb58fb83cbb5751",
- "info_dict": {
+ 'md5': '61aaf31a5c5c3041afb58fb83cbb5751',
+ 'info_dict': {
'id': '80859',
'ext': 'flv',
- "title": "Terraria Trailer",
+ 'title': 'Terraria Trailer',
'playlist_index': 2,
}
}
})
self._sort_formats(formats)
+ subtitles = {}
+ subtitle_references = video_info.get('subtitleReferences')
+ if isinstance(subtitle_references, list):
+ for sr in subtitle_references:
+ subtitle_url = sr.get('url')
+ if subtitle_url:
+ subtitles.setdefault('sv', []).append({'url': subtitle_url})
+
duration = video_info.get('materialLength')
age_limit = 18 if video_info.get('inappropriateForChildren') else 0
'id': video_id,
'title': title,
'formats': formats,
+ 'subtitles': subtitles,
'thumbnail': thumbnail,
'duration': duration,
'age_limit': age_limit,
class SVTPlayIE(SVTBaseIE):
IE_DESC = 'SVT Play and Öppet arkiv'
_VALID_URL = r'https?://(?:www\.)?(?P<host>svtplay|oppetarkiv)\.se/video/(?P<id>[0-9]+)'
- _TESTS = [{
- 'url': 'http://www.svtplay.se/video/2609989/sm-veckan/sm-veckan-rally-final-sasong-1-sm-veckan-rally-final',
- 'md5': 'ade3def0643fa1c40587a422f98edfd9',
- 'info_dict': {
- 'id': '2609989',
- 'ext': 'flv',
- 'title': 'SM veckan vinter, Örebro - Rally, final',
- 'duration': 4500,
- 'thumbnail': 're:^https?://.*[\.-]jpg$',
- 'age_limit': 0,
- },
- }, {
- 'url': 'http://www.oppetarkiv.se/video/1058509/rederiet-sasong-1-avsnitt-1-av-318',
- 'md5': 'c3101a17ce9634f4c1f9800f0746c187',
+ _TEST = {
+ 'url': 'http://www.svtplay.se/video/5996901/flygplan-till-haile-selassie/flygplan-till-haile-selassie-2',
+ 'md5': '2b6704fe4a28801e1a098bbf3c5ac611',
'info_dict': {
- 'id': '1058509',
- 'ext': 'flv',
- 'title': 'Farlig kryssning',
- 'duration': 2566,
+ 'id': '5996901',
+ 'ext': 'mp4',
+ 'title': 'Flygplan till Haile Selassie',
+ 'duration': 3527,
'thumbnail': 're:^https?://.*[\.-]jpg$',
'age_limit': 0,
+ 'subtitles': {
+ 'sv': [{
+ 'ext': 'wsrt',
+ }]
+ },
},
- 'skip': 'Only works from Sweden',
- }]
+ }
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
'ext': 'mp4',
'title': 'A History of Teaming',
'description': 'md5:2a9033db8da81f2edffa4c99888140b3',
+ 'duration': 422.255,
},
'params': {
# m3u8 download
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from .youtube import YoutubeIE
+from ..utils import (
+ js_to_json,
+ qualities,
+ determine_ext,
+)
+
+
+class Tele13IE(InfoExtractor):
+ _VALID_URL = r'^http://(?:www\.)?t13\.cl/videos(?:/[^/]+)+/(?P<id>[\w-]+)'
+ _TESTS = [
+ {
+ 'url': 'http://www.t13.cl/videos/actualidad/el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda',
+ 'md5': '4cb1fa38adcad8fea88487a078831755',
+ 'info_dict': {
+ 'id': 'el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda',
+ 'ext': 'mp4',
+ 'title': 'El círculo de hierro de Michelle Bachelet en su regreso a La Moneda',
+ },
+ 'params': {
+ # HTTP Error 404: Not Found
+ 'skip_download': True,
+ },
+ },
+ {
+ 'url': 'http://www.t13.cl/videos/mundo/tendencias/video-captan-misteriosa-bola-fuego-cielos-bangkok',
+ 'md5': '867adf6a3b3fef932c68a71d70b70946',
+ 'info_dict': {
+ 'id': 'rOoKv2OMpOw',
+ 'ext': 'mp4',
+ 'title': 'Shooting star seen on 7-Sep-2015',
+ 'description': 'md5:7292ff2a34b2f673da77da222ae77e1e',
+ 'uploader': 'Porjai Jaturongkhakun',
+ 'upload_date': '20150906',
+ 'uploader_id': 'UCnLY_3ezwNcDSC_Wc6suZxw',
+ },
+ 'add_ie': ['Youtube'],
+ }
+ ]
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+
+ setup_js = self._search_regex(
+ r"(?s)jwplayer\('player-vivo'\).setup\((\{.*?\})\)",
+ webpage, 'setup code')
+ sources = self._parse_json(self._search_regex(
+ r'sources\s*:\s*(\[[^\]]+\])', setup_js, 'sources'),
+ display_id, js_to_json)
+
+ preference = qualities(['Móvil', 'SD', 'HD'])
+ formats = []
+ urls = []
+ for f in sources:
+ format_url = f['file']
+ if format_url and format_url not in urls:
+ ext = determine_ext(format_url)
+ if ext == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ format_url, display_id, 'mp4', 'm3u8_native',
+ m3u8_id='hls', fatal=False))
+ elif YoutubeIE.suitable(format_url):
+ return self.url_result(format_url, 'Youtube')
+ else:
+ formats.append({
+ 'url': format_url,
+ 'format_id': f.get('label'),
+ 'preference': preference(f.get('label')),
+ 'ext': ext,
+ })
+ urls.append(format_url)
+ self._sort_formats(formats)
+
+ return {
+ 'id': display_id,
+ 'title': self._search_regex(
+ r'title\s*:\s*"([^"]+)"', setup_js, 'title'),
+ 'description': self._html_search_meta(
+ 'description', webpage, 'description'),
+ 'thumbnail': self._search_regex(
+ r'image\s*:\s*"([^"]+)"', setup_js, 'thumbnail', default=None),
+ 'formats': formats,
+ }
}
_video_fields = [
- "id", "name", "shortDescription", "longDescription", "creationDate",
- "publishedDate", "lastModifiedDate", "customFields", "videoStillURL",
- "thumbnailURL", "referenceId", "length", "playsTotal",
- "playsTrailingWeek", "renditions", "captioning", "startDate", "endDate"]
+ 'id', 'name', 'shortDescription', 'longDescription', 'creationDate',
+ 'publishedDate', 'lastModifiedDate', 'customFields', 'videoStillURL',
+ 'thumbnailURL', 'referenceId', 'length', 'playsTotal',
+ 'playsTrailingWeek', 'renditions', 'captioning', 'startDate', 'endDate']
def _real_extract(self, url):
webpage = self._download_webpage(url, url)
+++ /dev/null
-from __future__ import unicode_literals
-
-from .common import InfoExtractor
-from ..utils import (
- int_or_none,
- qualities,
-)
-
-
-class TestTubeIE(InfoExtractor):
- _VALID_URL = r'https?://testtube\.com/[^/?#]+/(?P<id>[^/?#]+)'
- _TESTS = [{
- 'url': 'https://testtube.com/dnews/5-weird-ways-plants-can-eat-animals?utm_source=FB&utm_medium=DNews&utm_campaign=DNewsSocial',
- 'info_dict': {
- 'id': '60163',
- 'display_id': '5-weird-ways-plants-can-eat-animals',
- 'duration': 275,
- 'ext': 'webm',
- 'title': '5 Weird Ways Plants Can Eat Animals',
- 'description': 'Why have some plants evolved to eat meat?',
- 'thumbnail': 're:^https?://.*\.jpg$',
- 'uploader': 'DNews',
- 'uploader_id': 'dnews',
- },
- }, {
- 'url': 'https://testtube.com/iflscience/insane-jet-ski-flipping',
- 'info_dict': {
- 'id': 'fAGfJ4YjVus',
- 'ext': 'mp4',
- 'title': 'Flipping Jet-Ski Skills | Outrageous Acts of Science',
- 'uploader': 'Science Channel',
- 'uploader_id': 'ScienceChannel',
- 'upload_date': '20150203',
- 'description': 'md5:e61374030015bae1d2e22f096d4769d6',
- }
- }]
-
- def _real_extract(self, url):
- display_id = self._match_id(url)
-
- webpage = self._download_webpage(url, display_id)
-
- youtube_url = self._html_search_regex(
- r'<iframe[^>]+src="((?:https?:)?//www.youtube.com/embed/[^"]+)"',
- webpage, 'youtube iframe', default=None)
- if youtube_url:
- return self.url_result(youtube_url, 'Youtube', video_id=display_id)
-
- video_id = self._search_regex(
- r"player\.loadRevision3Item\('video_id',\s*([0-9]+)\);",
- webpage, 'video ID')
-
- all_info = self._download_json(
- 'https://testtube.com/api/getPlaylist.json?api_key=ba9c741bce1b9d8e3defcc22193f3651b8867e62&codecs=h264,vp8,theora&video_id=%s' % video_id,
- video_id)
- info = all_info['items'][0]
-
- formats = []
- for vcodec, fdatas in info['media'].items():
- for name, fdata in fdatas.items():
- formats.append({
- 'format_id': '%s-%s' % (vcodec, name),
- 'url': fdata['url'],
- 'vcodec': vcodec,
- 'tbr': fdata.get('bitrate'),
- })
- self._sort_formats(formats)
-
- duration = int_or_none(info.get('duration'))
- images = info.get('images')
- thumbnails = None
- preference = qualities(['mini', 'small', 'medium', 'large'])
- if images:
- thumbnails = [{
- 'id': thumbnail_id,
- 'url': img_url,
- 'preference': preference(thumbnail_id)
- } for thumbnail_id, img_url in images.items()]
-
- return {
- 'id': video_id,
- 'display_id': display_id,
- 'title': info['title'],
- 'description': info.get('summary'),
- 'thumbnails': thumbnails,
- 'uploader': info.get('show', {}).get('name'),
- 'uploader_id': info.get('show', {}).get('slug'),
- 'duration': duration,
- 'formats': formats,
- }
class TestURLIE(InfoExtractor):
- """ Allows adressing of the test cases as test:yout.*be_1 """
+ """ Allows addressing of the test cases as test:yout.*be_1 """
IE_DESC = False # Do not list
_VALID_URL = r'test(?:url)?:(?P<id>(?P<extractor>.+?)(?:_(?P<num>[0-9]+))?)$'
class TF1IE(InfoExtractor):
"""TF1 uses the wat.tv player."""
- _VALID_URL = r'http://(?:(?:videos|www|lci)\.tf1|www\.tfou)\.fr/.*?-(?P<id>\d+)(?:-\d+)?\.html'
+ _VALID_URL = r'http://(?:(?:videos|www|lci)\.tf1|www\.tfou)\.fr/(?:[^/]+/)*(?P<id>.+?)\.html'
_TESTS = [{
'url': 'http://videos.tf1.fr/auto-moto/citroen-grand-c4-picasso-2013-presentation-officielle-8062060.html',
'info_dict': {
}, {
'url': 'http://www.tfou.fr/chuggington/videos/le-grand-mysterioso-chuggington-7085291-739.html',
'info_dict': {
- 'id': '12043945',
+ 'id': 'le-grand-mysterioso-chuggington-7085291-739',
'ext': 'mp4',
'title': 'Le grand Mystérioso - Chuggington',
'description': 'Le grand Mystérioso - Emery rêve qu\'un article lui soit consacré dans le journal.',
# Sometimes wat serves the whole file with the --test option
'skip_download': True,
},
+ 'skip': 'HTTP Error 410: Gone',
}, {
'url': 'http://www.tf1.fr/tf1/koh-lanta/videos/replay-koh-lanta-22-mai-2015.html',
'only_matching': True,
}, {
'url': 'http://lci.tf1.fr/sept-a-huit/videos/sept-a-huit-du-24-mai-2015-8611550.html',
'only_matching': True,
+ }, {
+ 'url': 'http://www.tf1.fr/hd1/documentaire/videos/mylene-farmer-d-une-icone.html',
+ 'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
- embed_url = self._html_search_regex(
- r'["\'](https?://www.wat.tv/embedframe/.*?)["\']', webpage, 'embed url')
- embed_page = self._download_webpage(embed_url, video_id,
- 'Downloading embed player page')
- wat_id = self._search_regex(r'UVID=(.*?)&', embed_page, 'wat id')
- wat_info = self._download_json(
- 'http://www.wat.tv/interface/contentv3/%s' % wat_id, video_id)
- return self.url_result(wat_info['media']['url'], 'Wat')
+ wat_id = self._html_search_regex(
+ r'(["\'])(?:https?:)?//www\.wat\.tv/embedframe/.*?(?P<id>\d{8})\1',
+ webpage, 'wat id', group='id')
+ return self.url_result('wat:%s' % wat_id, 'Wat')
--- /dev/null
+# encoding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+ parse_iso8601,
+ int_or_none,
+ ExtractorError,
+)
+
+
+class TheInterceptIE(InfoExtractor):
+ _VALID_URL = r'https://theintercept.com/fieldofvision/(?P<id>[^/?#]+)'
+ _TESTS = [{
+ 'url': 'https://theintercept.com/fieldofvision/thisisacoup-episode-four-surrender-or-die/',
+ 'md5': '145f28b41d44aab2f87c0a4ac8ec95bd',
+ 'info_dict': {
+ 'id': '46214',
+ 'ext': 'mp4',
+ 'title': '#ThisIsACoup – Episode Four: Surrender or Die',
+ 'description': 'md5:74dd27f0e2fbd50817829f97eaa33140',
+ 'timestamp': 1450429239,
+ 'upload_date': '20151218',
+ 'comment_count': int,
+ }
+ }]
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+
+ json_data = self._parse_json(self._search_regex(
+ r'initialStoreTree\s*=\s*(?P<json_data>{.+})', webpage,
+ 'initialStoreTree'), display_id)
+
+ for post in json_data['resources']['posts'].values():
+ if post['slug'] == display_id:
+ return {
+ '_type': 'url_transparent',
+ 'url': 'jwplatform:%s' % post['fov_videoid'],
+ 'id': compat_str(post['ID']),
+ 'display_id': display_id,
+ 'title': post['title'],
+ 'description': post.get('excerpt'),
+ 'timestamp': parse_iso8601(post.get('date')),
+ 'comment_count': int_or_none(post.get('comments_number')),
+ }
+ raise ExtractorError('Unable to find the current post')
from ..utils import (
determine_ext,
ExtractorError,
- xpath_with_ns,
- unsmuggle_url,
- int_or_none,
- url_basename,
float_or_none,
+ int_or_none,
+ sanitized_Request,
+ unsmuggle_url,
+ xpath_with_ns,
+ mimetype2ext,
)
default_ns = 'http://www.w3.org/2005/SMIL21/Language'
for caption in captions:
lang, src, mime = caption.get('lang', 'en'), caption.get('src'), caption.get('type')
subtitles[lang] = [{
- 'ext': 'srt' if mime == 'text/srt' else 'ttml',
+ 'ext': mimetype2ext(mime),
'url': src,
}]
class ThePlatformIE(ThePlatformBaseIE):
_VALID_URL = r'''(?x)
(?:https?://(?:link|player)\.theplatform\.com/[sp]/(?P<provider_id>[^/]+)/
- (?:(?P<media>(?:[^/]+/)+select/media/)|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))?
+ (?:(?P<media>(?:(?:[^/]+/)+select/)?media/)|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))?
|theplatform:)(?P<id>[^/\?&]+)'''
_TESTS = [{
smil_url = url
# Explicitly specified SMIL (see https://github.com/rg3/youtube-dl/issues/7385)
elif '/guid/' in url:
- webpage = self._download_webpage(url, video_id)
+ headers = {}
+ source_url = smuggled_data.get('source_url')
+ if source_url:
+ headers['Referer'] = source_url
+ request = sanitized_Request(url, headers=headers)
+ webpage = self._download_webpage(request, video_id)
smil_url = self._search_regex(
r'<link[^>]+href=(["\'])(?P<url>.+?)\1[^>]+type=["\']application/smil\+xml',
webpage, 'smil url', group='url')
first_video_id = None
duration = None
for item in entry['media$content']:
- smil_url = item['plfile$url'] + '&format=SMIL&Tracking=true&Embedded=true&formats=MPEG4,F4M'
- cur_video_id = url_basename(smil_url)
+ smil_url = item['plfile$url'] + '&format=SMIL&mbr=true'
+ cur_video_id = ThePlatformIE._match_id(smil_url)
if first_video_id is None:
first_video_id = cur_video_id
duration = float_or_none(item.get('plfile$duration'))
]
_DECODE_MAP = {
- "x": "a",
- "m": "b",
- "w": "c",
- "q": "d",
- "n": "e",
- "p": "f",
- "a": "0",
- "h": "1",
- "e": "2",
- "u": "3",
- "s": "4",
- "i": "5",
- "o": "6",
- "y": "7",
- "r": "8",
- "c": "9"
+ 'x': 'a',
+ 'm': 'b',
+ 'w': 'c',
+ 'q': 'd',
+ 'n': 'e',
+ 'p': 'f',
+ 'a': '0',
+ 'h': '1',
+ 'e': '2',
+ 'u': '3',
+ 's': '4',
+ 'i': '5',
+ 'o': '6',
+ 'y': '7',
+ 'r': '8',
+ 'c': '9'
}
def _real_extract(self, url):
from .common import InfoExtractor
from .brightcove import BrightcoveLegacyIE
-from .discovery import DiscoveryIE
from ..compat import compat_urlparse
-class TlcIE(DiscoveryIE):
- IE_NAME = 'tlc.com'
- _VALID_URL = r'http://www\.tlc\.com\/[a-zA-Z0-9\-]*/[a-zA-Z0-9\-]*/videos/(?P<id>[a-zA-Z0-9\-]*)(.htm)?'
-
- # DiscoveryIE has _TESTS
- _TESTS = [{
- 'url': 'http://www.tlc.com/tv-shows/cake-boss/videos/too-big-to-fly.htm',
- 'info_dict': {
- 'id': '104493',
- 'ext': 'mp4',
- 'title': 'Too Big to Fly',
- 'description': 'Buddy has taken on a high flying task.',
- 'duration': 119,
- 'timestamp': 1393365060,
- 'upload_date': '20140225',
- },
- 'params': {
- 'skip_download': True, # requires ffmpef
- },
- }]
-
-
class TlcDeIE(InfoExtractor):
IE_NAME = 'tlc.de'
_VALID_URL = r'http://www\.tlc\.de/sendungen/[^/]+/videos/(?P<title>[^/?]+)'
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ determine_ext,
+ ExtractorError,
+ float_or_none,
+ int_or_none,
+ parse_iso8601,
+ sanitized_Request,
+)
+
+
+class ToggleIE(InfoExtractor):
+ IE_NAME = 'toggle'
+ _VALID_URL = r'https?://video\.toggle\.sg/(?:en|zh)/(?:series|clips|movies)/(?:[^/]+/)+(?P<id>[0-9]+)'
+ _TESTS = [{
+ 'url': 'http://video.toggle.sg/en/series/lion-moms-tif/trailers/lion-moms-premier/343115',
+ 'info_dict': {
+ 'id': '343115',
+ 'ext': 'mp4',
+ 'title': 'Lion Moms Premiere',
+ 'description': 'md5:aea1149404bff4d7f7b6da11fafd8e6b',
+ 'upload_date': '20150910',
+ 'timestamp': 1441858274,
+ },
+ 'params': {
+ 'skip_download': 'm3u8 download',
+ }
+ }, {
+ 'note': 'DRM-protected video',
+ 'url': 'http://video.toggle.sg/en/movies/dug-s-special-mission/341413',
+ 'info_dict': {
+ 'id': '341413',
+ 'ext': 'wvm',
+ 'title': 'Dug\'s Special Mission',
+ 'description': 'md5:e86c6f4458214905c1772398fabc93e0',
+ 'upload_date': '20150827',
+ 'timestamp': 1440644006,
+ },
+ 'params': {
+ 'skip_download': 'DRM-protected wvm download',
+ }
+ }, {
+ # this also tests correct video id extraction
+ 'note': 'm3u8 links are geo-restricted, but Android/mp4 is okay',
+ 'url': 'http://video.toggle.sg/en/series/28th-sea-games-5-show/28th-sea-games-5-show-ep11/332861',
+ 'info_dict': {
+ 'id': '332861',
+ 'ext': 'mp4',
+ 'title': '28th SEA Games (5 Show) - Episode 11',
+ 'description': 'md5:3cd4f5f56c7c3b1340c50a863f896faa',
+ 'upload_date': '20150605',
+ 'timestamp': 1433480166,
+ },
+ 'params': {
+ 'skip_download': 'DRM-protected wvm download',
+ },
+ 'skip': 'm3u8 links are geo-restricted'
+ }, {
+ 'url': 'http://video.toggle.sg/en/clips/seraph-sun-aloysius-will-suddenly-sing-some-old-songs-in-high-pitch-on-set/343331',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://video.toggle.sg/zh/series/zero-calling-s2-hd/ep13/336367',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://video.toggle.sg/en/series/vetri-s2/webisodes/jeeva-is-an-orphan-vetri-s2-webisode-7/342302',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://video.toggle.sg/en/movies/seven-days/321936',
+ 'only_matching': True,
+ }]
+
+ _FORMAT_PREFERENCES = {
+ 'wvm-STBMain': -10,
+ 'wvm-iPadMain': -20,
+ 'wvm-iPhoneMain': -30,
+ 'wvm-Android': -40,
+ }
+ _API_USER = 'tvpapi_147'
+ _API_PASS = '11111'
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(
+ url, video_id, note='Downloading video page')
+
+ api_user = self._search_regex(
+ r'apiUser\s*:\s*(["\'])(?P<user>.+?)\1', webpage, 'apiUser',
+ default=self._API_USER, group='user')
+ api_pass = self._search_regex(
+ r'apiPass\s*:\s*(["\'])(?P<pass>.+?)\1', webpage, 'apiPass',
+ default=self._API_PASS, group='pass')
+
+ params = {
+ 'initObj': {
+ 'Locale': {
+ 'LocaleLanguage': '',
+ 'LocaleCountry': '',
+ 'LocaleDevice': '',
+ 'LocaleUserState': 0
+ },
+ 'Platform': 0,
+ 'SiteGuid': 0,
+ 'DomainID': '0',
+ 'UDID': '',
+ 'ApiUser': api_user,
+ 'ApiPass': api_pass
+ },
+ 'MediaID': video_id,
+ 'mediaType': 0,
+ }
+
+ req = sanitized_Request(
+ 'http://tvpapi.as.tvinci.com/v2_9/gateways/jsonpostgw.aspx?m=GetMediaInfo',
+ json.dumps(params).encode('utf-8'))
+ info = self._download_json(req, video_id, 'Downloading video info json')
+
+ title = info['MediaName']
+
+ formats = []
+ for video_file in info.get('Files', []):
+ video_url, vid_format = video_file.get('URL'), video_file.get('Format')
+ if not video_url or not vid_format:
+ continue
+ ext = determine_ext(video_url)
+ vid_format = vid_format.replace(' ', '')
+ # if geo-restricted, m3u8 is inaccessible, but mp4 is okay
+ if ext == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ video_url, video_id, ext='mp4', m3u8_id=vid_format,
+ note='Downloading %s m3u8 information' % vid_format,
+ errnote='Failed to download %s m3u8 information' % vid_format,
+ fatal=False))
+ elif ext in ('mp4', 'wvm'):
+ # wvm are drm-protected files
+ formats.append({
+ 'ext': ext,
+ 'url': video_url,
+ 'format_id': vid_format,
+ 'preference': self._FORMAT_PREFERENCES.get(ext + '-' + vid_format) or -1,
+ 'format_note': 'DRM-protected video' if ext == 'wvm' else None
+ })
+ if not formats:
+ # Most likely because geo-blocked
+ raise ExtractorError('No downloadable videos found', expected=True)
+ self._sort_formats(formats)
+
+ duration = int_or_none(info.get('Duration'))
+ description = info.get('Description')
+ created_at = parse_iso8601(info.get('CreationDate') or None)
+
+ average_rating = float_or_none(info.get('Rating'))
+ view_count = int_or_none(info.get('ViewCounter') or info.get('view_counter'))
+ like_count = int_or_none(info.get('LikeCounter') or info.get('like_counter'))
+
+ thumbnails = []
+ for picture in info.get('Pictures', []):
+ if not isinstance(picture, dict):
+ continue
+ pic_url = picture.get('URL')
+ if not pic_url:
+ continue
+ thumbnail = {
+ 'url': pic_url,
+ }
+ pic_size = picture.get('PicSize', '')
+ m = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', pic_size)
+ if m:
+ thumbnail.update({
+ 'width': int(m.group('width')),
+ 'height': int(m.group('height')),
+ })
+ thumbnails.append(thumbnail)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'duration': duration,
+ 'timestamp': created_at,
+ 'average_rating': average_rating,
+ 'view_count': view_count,
+ 'like_count': like_count,
+ 'thumbnails': thumbnails,
+ 'formats': formats,
+ }
# Presence of (no)watchplus function indicates HD quality is available
if re.search(r'function (no)?watchplus()', webpage):
- fvar = "fvarhd"
+ fvar = 'fvarhd'
else:
- fvar = "fvar"
+ fvar = 'fvar'
- info_url = "http://www.traileraddict.com/%s.php?tid=%s" % (fvar, str(video_id))
- info_webpage = self._download_webpage(info_url, video_id, "Downloading the info webpage")
+ info_url = 'http://www.traileraddict.com/%s.php?tid=%s' % (fvar, str(video_id))
+ info_webpage = self._download_webpage(info_url, video_id, 'Downloading the info webpage')
final_url = self._search_regex(r'&fileurl=(.+)',
info_webpage, 'Download url').replace('%3F', '?')
# coding: utf-8
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
-from ..utils import ExtractorError
+from ..utils import (
+ ExtractorError,
+ int_or_none,
+ parse_iso8601,
+)
class TriluliluIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?trilulilu\.ro/(?:video-[^/]+/)?(?P<id>[^/#\?]+)'
- _TEST = {
- 'url': 'http://www.trilulilu.ro/video-animatie/big-buck-bunny-1',
- 'md5': 'c1450a00da251e2769b74b9005601cac',
+ _VALID_URL = r'https?://(?:(?:www|m)\.)?trilulilu\.ro/(?:[^/]+/)?(?P<id>[^/#\?]+)'
+ _TESTS = [{
+ 'url': 'http://www.trilulilu.ro/big-buck-bunny-1',
+ 'md5': '68da087b676a6196a413549212f60cc6',
'info_dict': {
'id': 'ae2899e124140b',
'ext': 'mp4',
'title': 'Big Buck Bunny',
'description': ':) pentru copilul din noi',
+ 'uploader_id': 'chipy',
+ 'upload_date': '20120304',
+ 'timestamp': 1330830647,
+ 'uploader': 'chipy',
+ 'view_count': int,
+ 'like_count': int,
+ 'comment_count': int,
},
- }
+ }, {
+ 'url': 'http://www.trilulilu.ro/adena-ft-morreti-inocenta',
+ 'md5': '929dfb8729dc71750463af88bbbbf4a4',
+ 'info_dict': {
+ 'id': 'f299710e3c91c5',
+ 'ext': 'mp4',
+ 'title': 'Adena ft. Morreti - Inocenta',
+ 'description': 'pop music',
+ 'uploader_id': 'VEVOmixt',
+ 'upload_date': '20151204',
+ 'uploader': 'VEVOmixt',
+ 'timestamp': 1449187937,
+ 'view_count': int,
+ 'like_count': int,
+ 'comment_count': int,
+ },
+ }]
def _real_extract(self, url):
display_id = self._match_id(url)
- webpage = self._download_webpage(url, display_id)
+ media_info = self._download_json('http://m.trilulilu.ro/%s?format=json' % display_id, display_id)
- if re.search(r'Fişierul nu este disponibil pentru vizionare în ţara dumneavoastră', webpage):
- raise ExtractorError(
- 'This video is not available in your country.', expected=True)
- elif re.search('Fişierul poate fi accesat doar de către prietenii lui', webpage):
+ age_limit = 0
+ errors = media_info.get('errors', {})
+ if errors.get('friends'):
raise ExtractorError('This video is private.', expected=True)
+ elif errors.get('geoblock'):
+ raise ExtractorError('This video is not available in your country.', expected=True)
+ elif errors.get('xxx_unlogged'):
+ age_limit = 18
- flashvars_str = self._search_regex(
- r'block_flash_vars\s*=\s*(\{[^\}]+\})', webpage, 'flashvars', fatal=False, default=None)
+ media_class = media_info.get('class')
+ if media_class not in ('video', 'audio'):
+ raise ExtractorError('not a video or an audio')
- if flashvars_str:
- flashvars = self._parse_json(flashvars_str, display_id)
- else:
- raise ExtractorError(
- 'This page does not contain videos', expected=True)
+ user = media_info.get('user', {})
- if flashvars['isMP3'] == 'true':
- raise ExtractorError(
- 'Audio downloads are currently not supported', expected=True)
+ thumbnail = media_info.get('cover_url')
+ if thumbnail:
+ thumbnail.format(width='1600', height='1200')
- video_id = flashvars['hash']
- title = self._og_search_title(webpage)
- thumbnail = self._og_search_thumbnail(webpage)
- description = self._og_search_description(webpage, default=None)
-
- format_url = ('http://fs%(server)s.trilulilu.ro/%(hash)s/'
- 'video-formats2' % flashvars)
- format_doc = self._download_xml(
- format_url, video_id,
- note='Downloading formats',
- errnote='Error while downloading formats')
-
- video_url_template = (
- 'http://fs%(server)s.trilulilu.ro/stream.php?type=video'
- '&source=site&hash=%(hash)s&username=%(userid)s&'
- 'key=ministhebest&format=%%s&sig=&exp=' %
- flashvars)
- formats = [
- {
- 'format_id': fnode.text.partition('-')[2],
- 'url': video_url_template % fnode.text,
- 'ext': fnode.text.partition('-')[0]
- }
-
- for fnode in format_doc.findall('./formats/format')
- ]
+ # TODO: get correct ext for audio files
+ stream_type = media_info.get('stream_type')
+ formats = [{
+ 'url': media_info['href'],
+ 'ext': stream_type,
+ }]
+ if media_info.get('is_hd'):
+ formats.append({
+ 'format_id': 'hd',
+ 'url': media_info['hrefhd'],
+ 'ext': stream_type,
+ })
+ if media_class == 'audio':
+ formats[0]['vcodec'] = 'none'
+ else:
+ formats[0]['format_id'] = 'sd'
return {
- 'id': video_id,
+ 'id': media_info['identifier'].split('|')[1],
'display_id': display_id,
'formats': formats,
- 'title': title,
- 'description': description,
+ 'title': media_info['title'],
+ 'description': media_info.get('description'),
'thumbnail': thumbnail,
+ 'uploader_id': user.get('username'),
+ 'uploader': user.get('fullname'),
+ 'timestamp': parse_iso8601(media_info.get('published'), ' '),
+ 'duration': int_or_none(media_info.get('duration')),
+ 'view_count': int_or_none(media_info.get('count_views')),
+ 'like_count': int_or_none(media_info.get('count_likes')),
+ 'comment_count': int_or_none(media_info.get('count_comments')),
+ 'age_limit': age_limit,
}
--- /dev/null
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .nuevo import NuevoBaseIE
+
+
+class TrollvidsIE(NuevoBaseIE):
+ _VALID_URL = r'http://(?:www\.)?trollvids\.com/video/(?P<id>\d+)/(?P<display_id>[^/?#&]+)'
+ IE_NAME = 'trollvids'
+ _TEST = {
+ 'url': 'http://trollvids.com/video/2349002/%E3%80%90MMD-R-18%E3%80%91%E3%82%AC%E3%83%BC%E3%83%AB%E3%83%95%E3%83%AC%E3%83%B3%E3%83%89-carrymeoff',
+ 'md5': '1d53866b2c514b23ed69e4352fdc9839',
+ 'info_dict': {
+ 'id': '2349002',
+ 'ext': 'mp4',
+ 'title': '【MMD R-18】ガールフレンド carry_me_off',
+ 'age_limit': 18,
+ 'duration': 216.78,
+ },
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ display_id = mobj.group('display_id')
+
+ info = self._extract_nuevo(
+ 'http://trollvids.com/nuevo/player/config.php?v=%s' % video_id,
+ video_id)
+ info.update({
+ 'display_id': display_id,
+ 'age_limit': 18
+ })
+ return info
from __future__ import unicode_literals
-from .common import InfoExtractor
-from ..utils import xpath_text
+from .nuevo import NuevoBaseIE
-class TruTubeIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?trutube\.tv/(?:video/|nuevo/player/embed\.php\?v=)(?P<id>[0-9]+)'
+class TruTubeIE(NuevoBaseIE):
+ _VALID_URL = r'https?://(?:www\.)?trutube\.tv/(?:video/|nuevo/player/embed\.php\?v=)(?P<id>\d+)'
_TESTS = [{
'url': 'http://trutube.tv/video/14880/Ramses-II-Proven-To-Be-A-Red-Headed-Caucasoid-',
'md5': 'c5b6e301b0a2040b074746cbeaa26ca1',
def _real_extract(self, url):
video_id = self._match_id(url)
-
- config = self._download_xml(
+ return self._extract_nuevo(
'https://trutube.tv/nuevo/player/config.php?v=%s' % video_id,
- video_id, transform_source=lambda s: s.strip())
-
- # filehd is always 404
- video_url = xpath_text(config, './file', 'video URL', fatal=True)
- title = xpath_text(config, './title', 'title').strip()
- thumbnail = xpath_text(config, './image', ' thumbnail')
-
- return {
- 'id': video_id,
- 'url': video_url,
- 'title': title,
- 'thumbnail': thumbnail,
- }
+ video_id)
from __future__ import unicode_literals
-import json
import re
from .common import InfoExtractor
-from ..compat import compat_urllib_parse_urlparse
+from ..compat import compat_str
from ..utils import (
int_or_none,
sanitized_Request,
class Tube8IE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tube8\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/(?P<id>\d+)'
- _TESTS = [
- {
- 'url': 'http://www.tube8.com/teen/kasia-music-video/229795/',
- 'md5': '44bf12b98313827dd52d35b8706a4ea0',
- 'info_dict': {
- 'id': '229795',
- 'display_id': 'kasia-music-video',
- 'ext': 'mp4',
- 'description': 'hot teen Kasia grinding',
- 'uploader': 'unknown',
- 'title': 'Kasia music video',
- 'age_limit': 18,
- }
- },
- {
- 'url': 'http://www.tube8.com/shemale/teen/blonde-cd-gets-kidnapped-by-two-blacks-and-punished-for-being-a-slutty-girl/19569151/',
- 'only_matching': True,
- },
- ]
+ _TESTS = [{
+ 'url': 'http://www.tube8.com/teen/kasia-music-video/229795/',
+ 'md5': '65e20c48e6abff62ed0c3965fff13a39',
+ 'info_dict': {
+ 'id': '229795',
+ 'display_id': 'kasia-music-video',
+ 'ext': 'mp4',
+ 'description': 'hot teen Kasia grinding',
+ 'uploader': 'unknown',
+ 'title': 'Kasia music video',
+ 'age_limit': 18,
+ 'duration': 230,
+ }
+ }, {
+ 'url': 'http://www.tube8.com/shemale/teen/blonde-cd-gets-kidnapped-by-two-blacks-and-punished-for-being-a-slutty-girl/19569151/',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, display_id)
- flashvars = json.loads(self._html_search_regex(
- r'flashvars\s*=\s*({.+?});\r?\n', webpage, 'flashvars'))
+ flashvars = self._parse_json(
+ self._search_regex(
+ r'flashvars\s*=\s*({.+?});\r?\n', webpage, 'flashvars'),
+ video_id)
- video_url = flashvars['video_url']
- if flashvars.get('encrypted') is True:
- video_url = aes_decrypt_text(video_url, flashvars['video_title'], 32).decode('utf-8')
- path = compat_urllib_parse_urlparse(video_url).path
- format_id = '-'.join(path.split('/')[4].split('_')[:2])
+ formats = []
+ for key, video_url in flashvars.items():
+ if not isinstance(video_url, compat_str) or not video_url.startswith('http'):
+ continue
+ height = self._search_regex(
+ r'quality_(\d+)[pP]', key, 'height', default=None)
+ if not height:
+ continue
+ if flashvars.get('encrypted') is True:
+ video_url = aes_decrypt_text(
+ video_url, flashvars['video_title'], 32).decode('utf-8')
+ formats.append({
+ 'url': video_url,
+ 'format_id': '%sp' % height,
+ 'height': int(height),
+ })
+ self._sort_formats(formats)
thumbnail = flashvars.get('image_url')
uploader = self._html_search_regex(
r'<span class="username">\s*(.+?)\s*<',
webpage, 'uploader', fatal=False)
+ duration = int_or_none(flashvars.get('video_duration'))
- like_count = int_or_none(self._html_search_regex(
+ like_count = int_or_none(self._search_regex(
r'rupVar\s*=\s*"(\d+)"', webpage, 'like count', fatal=False))
- dislike_count = int_or_none(self._html_search_regex(
+ dislike_count = int_or_none(self._search_regex(
r'rdownVar\s*=\s*"(\d+)"', webpage, 'dislike count', fatal=False))
- view_count = self._html_search_regex(
- r'<strong>Views: </strong>([\d,\.]+)\s*</li>', webpage, 'view count', fatal=False)
- if view_count:
- view_count = str_to_int(view_count)
- comment_count = self._html_search_regex(
- r'<span id="allCommentsCount">(\d+)</span>', webpage, 'comment count', fatal=False)
- if comment_count:
- comment_count = str_to_int(comment_count)
+ view_count = str_to_int(self._search_regex(
+ r'<strong>Views: </strong>([\d,\.]+)\s*</li>',
+ webpage, 'view count', fatal=False))
+ comment_count = str_to_int(self._search_regex(
+ r'<span id="allCommentsCount">(\d+)</span>',
+ webpage, 'comment count', fatal=False))
return {
'id': video_id,
'display_id': display_id,
- 'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
- 'format_id': format_id,
+ 'duration': duration,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
'age_limit': 18,
+ 'formats': formats,
}
from .common import InfoExtractor
from ..compat import compat_str
+from ..utils import (
+ int_or_none,
+ float_or_none,
+ unescapeHTML,
+)
class TudouIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?tudou\.com/(?:listplay|programs(?:/view)?|albumplay)/([^/]+/)*(?P<id>[^/?#]+?)(?:\.html)?/?(?:$|[?#])'
+ IE_NAME = 'tudou'
+ _VALID_URL = r'https?://(?:www\.)?tudou\.com/(?:(?:programs|wlplay)/view|(?:listplay|albumplay)/[\w-]{11})/(?P<id>[\w-]{11})'
_TESTS = [{
'url': 'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html',
'md5': '140a49ed444bd22f93330985d8475fcb',
'ext': 'f4v',
'title': '卡马乔国足开大脚长传冲吊集锦',
'thumbnail': 're:^https?://.*\.jpg$',
+ 'timestamp': 1372113489000,
+ 'description': '卡马乔卡家军,开大脚先进战术不完全集锦!',
+ 'duration': 289.04,
+ 'view_count': int,
+ 'filesize': int,
}
}, {
'url': 'http://www.tudou.com/programs/view/ajX3gyhL0pc/',
'ext': 'f4v',
'title': 'La Sylphide-Bolshoi-Ekaterina Krysanova & Vyacheslav Lopatin 2012',
'thumbnail': 're:^https?://.*\.jpg$',
+ 'timestamp': 1349207518000,
+ 'description': 'md5:294612423894260f2dcd5c6c04fe248b',
+ 'duration': 5478.33,
+ 'view_count': int,
+ 'filesize': int,
}
- }, {
- 'url': 'http://www.tudou.com/albumplay/cJAHGih4yYg.html',
- 'only_matching': True,
}]
_PLAYER_URL = 'http://js.tudouui.com/bin/lingtong/PortalPlayer_177.swf'
info_url = 'http://v2.tudou.com/f?id=' + compat_str(video_id)
if quality:
info_url += '&hd' + quality
- xml_data = self._download_xml(info_url, video_id, "Opening the info XML page")
+ xml_data = self._download_xml(info_url, video_id, 'Opening the info XML page')
final_url = xml_data.text
return final_url
def _real_extract(self, url):
video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
+ item_data = self._download_json(
+ 'http://www.tudou.com/tvp/getItemInfo.action?ic=%s' % video_id, video_id)
- youku_vcode = self._search_regex(
- r'vcode\s*:\s*[\'"]([^\'"]*)[\'"]', webpage, 'youku vcode', default=None)
+ youku_vcode = item_data.get('vcode')
if youku_vcode:
return self.url_result('youku:' + youku_vcode, ie='Youku')
- title = self._search_regex(
- r',kw\s*:\s*[\'"]([^\'"]+)[\'"]', webpage, 'title')
- thumbnail_url = self._search_regex(
- r',pic\s*:\s*[\'"]([^\'"]+)[\'"]', webpage, 'thumbnail URL', fatal=False)
-
- player_url = self._search_regex(
- r'playerUrl\s*:\s*[\'"]([^\'"]+\.swf)[\'"]',
- webpage, 'player URL', default=self._PLAYER_URL)
+ title = unescapeHTML(item_data['kw'])
+ description = item_data.get('desc')
+ thumbnail_url = item_data.get('pic')
+ view_count = int_or_none(item_data.get('playTimes'))
+ timestamp = int_or_none(item_data.get('pt'))
- segments = self._parse_json(self._search_regex(
- r'segs: \'([^\']+)\'', webpage, 'segments'), video_id)
+ segments = self._parse_json(item_data['itemSegs'], video_id)
# It looks like the keys are the arguments that have to be passed as
# the hd field in the request url, we pick the higher
# Also, filter non-number qualities (see issue #3643).
'ext': ext,
'title': title,
'thumbnail': thumbnail_url,
+ 'description': description,
+ 'view_count': view_count,
+ 'timestamp': timestamp,
+ 'duration': float_or_none(part.get('seconds'), 1000),
+ 'filesize': int_or_none(part.get('size')),
'http_headers': {
- 'Referer': player_url,
+ 'Referer': self._PLAYER_URL,
},
}
result.append(part_info)
'id': video_id,
'title': title,
}
+
+
+class TudouPlaylistIE(InfoExtractor):
+ IE_NAME = 'tudou:playlist'
+ _VALID_URL = r'https?://(?:www\.)?tudou\.com/listplay/(?P<id>[\w-]{11})\.html'
+ _TESTS = [{
+ 'url': 'http://www.tudou.com/listplay/zzdE77v6Mmo.html',
+ 'info_dict': {
+ 'id': 'zzdE77v6Mmo',
+ },
+ 'playlist_mincount': 209,
+ }]
+
+ def _real_extract(self, url):
+ playlist_id = self._match_id(url)
+ playlist_data = self._download_json(
+ 'http://www.tudou.com/tvp/plist.action?lcode=%s' % playlist_id, playlist_id)
+ entries = [self.url_result(
+ 'http://www.tudou.com/programs/view/%s' % item['icode'],
+ 'Tudou', item['icode'],
+ item['kw']) for item in playlist_data['items']]
+ return self.playlist_result(entries, playlist_id)
+
+
+class TudouAlbumIE(InfoExtractor):
+ IE_NAME = 'tudou:album'
+ _VALID_URL = r'https?://(?:www\.)?tudou\.com/album(?:cover|play)/(?P<id>[\w-]{11})'
+ _TESTS = [{
+ 'url': 'http://www.tudou.com/albumplay/v5qckFJvNJg.html',
+ 'info_dict': {
+ 'id': 'v5qckFJvNJg',
+ },
+ 'playlist_mincount': 45,
+ }]
+
+ def _real_extract(self, url):
+ album_id = self._match_id(url)
+ album_data = self._download_json(
+ 'http://www.tudou.com/tvp/alist.action?acode=%s' % album_id, album_id)
+ entries = [self.url_result(
+ 'http://www.tudou.com/programs/view/%s' % item['icode'],
+ 'Tudou', item['icode'],
+ item['kw']) for item in album_data['items']]
+ return self.playlist_result(entries, album_id)
from __future__ import unicode_literals
import json
-import re
from .common import InfoExtractor
from ..utils import ExtractorError
+from ..compat import compat_urlparse
-class TuneInIE(InfoExtractor):
- _VALID_URL = r'''(?x)https?://(?:www\.)?
- (?:
- tunein\.com/
- (?:
- radio/.*?-s|
- station/.*?StationId\=
- )(?P<id>[0-9]+)
- |tun\.in/(?P<redirect_id>[A-Za-z0-9]+)
- )
- '''
- _API_URL_TEMPLATE = 'http://tunein.com/tuner/tune/?stationId={0:}&tuneType=Station'
-
- _INFO_DICT = {
- 'id': '34682',
- 'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2',
- 'ext': 'aac',
- 'thumbnail': 're:^https?://.*\.png$',
- 'location': 'Tacoma, WA',
- }
- _TESTS = [
- {
- 'url': 'http://tunein.com/radio/Jazz24-885-s34682/',
- 'info_dict': _INFO_DICT,
- 'params': {
- 'skip_download': True, # live stream
- },
- },
- { # test redirection
- 'url': 'http://tun.in/ser7s',
- 'info_dict': _INFO_DICT,
- 'params': {
- 'skip_download': True, # live stream
- },
- },
- ]
+class TuneInBaseIE(InfoExtractor):
+ _API_BASE_URL = 'http://tunein.com/tuner/tune/'
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- redirect_id = mobj.group('redirect_id')
- if redirect_id:
- # The server doesn't support HEAD requests
- urlh = self._request_webpage(
- url, redirect_id, note='Downloading redirect page')
- url = urlh.geturl()
- self.to_screen('Following redirect: %s' % url)
- mobj = re.match(self._VALID_URL, url)
- station_id = mobj.group('id')
-
- station_info = self._download_json(
- self._API_URL_TEMPLATE.format(station_id),
- station_id, note='Downloading station JSON')
-
- title = station_info['Title']
- thumbnail = station_info.get('Logo')
- location = station_info.get('Location')
- streams_url = station_info.get('StreamUrl')
+ content_id = self._match_id(url)
+
+ content_info = self._download_json(
+ self._API_BASE_URL + self._API_URL_QUERY % content_id,
+ content_id, note='Downloading JSON metadata')
+
+ title = content_info['Title']
+ thumbnail = content_info.get('Logo')
+ location = content_info.get('Location')
+ streams_url = content_info.get('StreamUrl')
if not streams_url:
- raise ExtractorError('No downloadable streams found',
- expected=True)
+ raise ExtractorError('No downloadable streams found', expected=True)
+ if not streams_url.startswith('http://'):
+ streams_url = compat_urlparse.urljoin(url, streams_url)
+
stream_data = self._download_webpage(
- streams_url, station_id, note='Downloading stream data')
+ streams_url, content_id, note='Downloading stream data')
streams = json.loads(self._search_regex(
r'\((.*)\);', stream_data, 'stream info'))['Streams']
self._sort_formats(formats)
return {
- 'id': station_id,
+ 'id': content_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'location': location,
'is_live': is_live,
}
+
+
+class TuneInClipIE(TuneInBaseIE):
+ IE_NAME = 'tunein:clip'
+ _VALID_URL = r'https?://(?:www\.)?tunein\.com/station/.*?audioClipId\=(?P<id>\d+)'
+ _API_URL_QUERY = '?tuneType=AudioClip&audioclipId=%s'
+
+ _TESTS = [
+ {
+ 'url': 'http://tunein.com/station/?stationId=246119&audioClipId=816',
+ 'md5': '99f00d772db70efc804385c6b47f4e77',
+ 'info_dict': {
+ 'id': '816',
+ 'title': '32m',
+ 'ext': 'mp3',
+ },
+ },
+ ]
+
+
+class TuneInStationIE(TuneInBaseIE):
+ IE_NAME = 'tunein:station'
+ _VALID_URL = r'https?://(?:www\.)?tunein\.com/(?:radio/.*?-s|station/.*?StationId\=)(?P<id>\d+)'
+ _API_URL_QUERY = '?tuneType=Station&stationId=%s'
+
+ @classmethod
+ def suitable(cls, url):
+ return False if TuneInClipIE.suitable(url) else super(TuneInStationIE, cls).suitable(url)
+
+ _TESTS = [
+ {
+ 'url': 'http://tunein.com/radio/Jazz24-885-s34682/',
+ 'info_dict': {
+ 'id': '34682',
+ 'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2',
+ 'ext': 'mp3',
+ 'location': 'Tacoma, WA',
+ },
+ 'params': {
+ 'skip_download': True, # live stream
+ },
+ },
+ ]
+
+
+class TuneInProgramIE(TuneInBaseIE):
+ IE_NAME = 'tunein:program'
+ _VALID_URL = r'https?://(?:www\.)?tunein\.com/(?:radio/.*?-p|program/.*?ProgramId\=)(?P<id>\d+)'
+ _API_URL_QUERY = '?tuneType=Program&programId=%s'
+
+ _TESTS = [
+ {
+ 'url': 'http://tunein.com/radio/Jazz-24-p2506/',
+ 'info_dict': {
+ 'id': '2506',
+ 'title': 'Jazz 24 on 91.3 WUKY-HD3',
+ 'ext': 'mp3',
+ 'location': 'Lexington, KY',
+ },
+ 'params': {
+ 'skip_download': True, # live stream
+ },
+ },
+ ]
+
+
+class TuneInTopicIE(TuneInBaseIE):
+ IE_NAME = 'tunein:topic'
+ _VALID_URL = r'https?://(?:www\.)?tunein\.com/topic/.*?TopicId\=(?P<id>\d+)'
+ _API_URL_QUERY = '?tuneType=Topic&topicId=%s'
+
+ _TESTS = [
+ {
+ 'url': 'http://tunein.com/topic/?TopicId=101830576',
+ 'md5': 'c31a39e6f988d188252eae7af0ef09c9',
+ 'info_dict': {
+ 'id': '101830576',
+ 'title': 'Votez pour moi du 29 octobre 2015 (29/10/15)',
+ 'ext': 'mp3',
+ 'location': 'Belgium',
+ },
+ },
+ ]
+
+
+class TuneInShortenerIE(InfoExtractor):
+ IE_NAME = 'tunein:shortener'
+ IE_DESC = False # Do not list
+ _VALID_URL = r'https?://tun\.in/(?P<id>[A-Za-z0-9]+)'
+
+ _TEST = {
+ # test redirection
+ 'url': 'http://tun.in/ser7s',
+ 'info_dict': {
+ 'id': '34682',
+ 'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2',
+ 'ext': 'mp3',
+ 'location': 'Tacoma, WA',
+ },
+ 'params': {
+ 'skip_download': True, # live stream
+ },
+ }
+
+ def _real_extract(self, url):
+ redirect_id = self._match_id(url)
+ # The server doesn't support HEAD requests
+ urlh = self._request_webpage(
+ url, redirect_id, note='Downloading redirect page')
+ url = urlh.geturl()
+ self.to_screen('Following redirect: %s' % url)
+ return self.url_result(url)
_VALID_URL = 'http://(?:www\.)?tv2\.no/v/(?P<id>\d+)'
_TEST = {
'url': 'http://www.tv2.no/v/916509/',
- 'md5': '9cb9e3410b18b515d71892f27856e9b1',
'info_dict': {
'id': '916509',
- 'ext': 'flv',
- 'title': 'Se Gryttens hyllest av Steven Gerrard',
+ 'ext': 'mp4',
+ 'title': 'Se Frode Gryttens hyllest av Steven Gerrard',
'description': 'TV 2 Sportens huspoet tar avskjed med Liverpools kaptein Steven Gerrard.',
'timestamp': 1431715610,
'upload_date': '20150515',
'duration': 156.967,
'view_count': int,
'categories': list,
- }
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
}
def _real_extract(self, url):
info = self._download_json(
'http://www.tv4play.se/player/assets/%s.json' % video_id, video_id, 'Downloading video info JSON')
- # If is_geo_restricted is true, it doesn't neceserally mean we can't download it
+ # If is_geo_restricted is true, it doesn't necessarily mean we can't download it
if info['is_geo_restricted']:
self.report_warning('This content might not be available in your country due to licensing restrictions.')
if info['requires_subscription']:
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .mtv import MTVServicesInfoExtractor
+
+
+class TVLandIE(MTVServicesInfoExtractor):
+ IE_NAME = 'tvland.com'
+ _VALID_URL = r'https?://(?:www\.)?tvland\.com/(?:video-clips|episodes)/(?P<id>[^/?#.]+)'
+ _FEED_URL = 'http://www.tvland.com/feeds/mrss/'
+ _TESTS = [{
+ 'url': 'http://www.tvland.com/episodes/hqhps2/everybody-loves-raymond-the-invasion-ep-048',
+ 'playlist': [
+ {
+ 'md5': '227e9723b9669c05bf51098b10287aa7',
+ 'info_dict': {
+ 'id': 'bcbd3a83-3aca-4dca-809b-f78a87dcccdd',
+ 'ext': 'mp4',
+ 'title': 'Everybody Loves Raymond|Everybody Loves Raymond 048 HD, Part 1 of 5',
+ }
+ },
+ {
+ 'md5': '9fa2b764ec0e8194fb3ebb01a83df88b',
+ 'info_dict': {
+ 'id': 'f4279548-6e13-40dd-92e8-860d27289197',
+ 'ext': 'mp4',
+ 'title': 'Everybody Loves Raymond|Everybody Loves Raymond 048 HD, Part 2 of 5',
+ }
+ },
+ {
+ 'md5': 'fde4c3bccd7cc7e3576b338734153cec',
+ 'info_dict': {
+ 'id': '664e4a38-53ef-4115-9bc9-d0f789ec6334',
+ 'ext': 'mp4',
+ 'title': 'Everybody Loves Raymond|Everybody Loves Raymond 048 HD, Part 3 of 5',
+ }
+ },
+ {
+ 'md5': '247f6780cda6891f2e49b8ae2b10e017',
+ 'info_dict': {
+ 'id': '9146ecf5-b15a-4d78-879c-6679b77f4960',
+ 'ext': 'mp4',
+ 'title': 'Everybody Loves Raymond|Everybody Loves Raymond 048 HD, Part 4 of 5',
+ }
+ },
+ {
+ 'md5': 'fd269f33256e47bad5eb6c40de089ff6',
+ 'info_dict': {
+ 'id': '04334a2e-9a47-4214-a8c2-ae5792e2fab7',
+ 'ext': 'mp4',
+ 'title': 'Everybody Loves Raymond|Everybody Loves Raymond 048 HD, Part 5 of 5',
+ }
+ }
+ ],
+ }, {
+ 'url': 'http://www.tvland.com/video-clips/zea2ev/younger-younger--hilary-duff---little-lies',
+ 'md5': 'e2c6389401cf485df26c79c247b08713',
+ 'info_dict': {
+ 'id': 'b8697515-4bbe-4e01-83d5-fa705ce5fa88',
+ 'ext': 'mp4',
+ 'title': 'Younger|Younger: Hilary Duff - Little Lies',
+ 'description': 'md5:7d192f56ca8d958645c83f0de8ef0269'
+ },
+ }]
from ..utils import (
parse_iso8601,
int_or_none,
+ xpath_attr,
+ xpath_element,
)
_TESTS = [
{
'url': 'http://www.24video.net/video/view/1044982',
- 'md5': 'd041af8b5b4246ea466226a0d6693345',
+ 'md5': 'e09fc0901d9eaeedac872f154931deeb',
'info_dict': {
'id': '1044982',
'ext': 'mp4',
r'<div class="comments-title" id="comments-count">(\d+) комментари',
webpage, 'comment count', fatal=False))
- formats = []
+ # Sets some cookies
+ self._download_xml(
+ r'http://www.24video.net/video/xml/%s?mode=init' % video_id,
+ video_id, 'Downloading init XML')
- pc_video = self._download_xml(
+ video_xml = self._download_xml(
'http://www.24video.net/video/xml/%s?mode=play' % video_id,
- video_id, 'Downloading PC video URL').find('.//video')
+ video_id, 'Downloading video XML')
- formats.append({
- 'url': pc_video.attrib['url'],
- 'format_id': 'pc',
- 'quality': 1,
- })
+ video = xpath_element(video_xml, './/video', 'video', fatal=True)
- like_count = int_or_none(pc_video.get('ratingPlus'))
- dislike_count = int_or_none(pc_video.get('ratingMinus'))
- age_limit = 18 if pc_video.get('adult') == 'true' else 0
+ formats = [{
+ 'url': xpath_attr(video, '', 'url', 'video URL', fatal=True),
+ }]
- mobile_video = self._download_xml(
- 'http://www.24video.net/video/xml/%s' % video_id,
- video_id, 'Downloading mobile video URL').find('.//video')
-
- formats.append({
- 'url': mobile_video.attrib['url'],
- 'format_id': 'mobile',
- 'quality': 0,
- })
-
- self._sort_formats(formats)
+ like_count = int_or_none(video.get('ratingPlus'))
+ dislike_count = int_or_none(video.get('ratingMinus'))
+ age_limit = 18 if video.get('adult') == 'true' else 0
return {
'id': video_id,
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import remove_end
+
+
+class TwentyMinutenIE(InfoExtractor):
+ IE_NAME = '20min'
+ _VALID_URL = r'https?://(?:www\.)?20min\.ch/(?:videotv/*\?.*\bvid=(?P<id>\d+)|(?:[^/]+/)*(?P<display_id>[^/#?]+))'
+ _TESTS = [{
+ # regular video
+ 'url': 'http://www.20min.ch/videotv/?vid=469148&cid=2',
+ 'md5': 'b52d6bc6ea6398e6a38f12cfd418149c',
+ 'info_dict': {
+ 'id': '469148',
+ 'ext': 'flv',
+ 'title': '85 000 Franken für 15 perfekte Minuten',
+ 'description': 'Was die Besucher vom Silvesterzauber erwarten können. (Video: Alice Grosjean/Murat Temel)',
+ 'thumbnail': 'http://thumbnails.20min-tv.ch/server063/469148/frame-72-469148.jpg'
+ }
+ }, {
+ # news article with video
+ 'url': 'http://www.20min.ch/schweiz/news/story/-Wir-muessen-mutig-nach-vorne-schauen--22050469',
+ 'md5': 'cd4cbb99b94130cff423e967cd275e5e',
+ 'info_dict': {
+ 'id': '469408',
+ 'display_id': '-Wir-muessen-mutig-nach-vorne-schauen--22050469',
+ 'ext': 'flv',
+ 'title': '«Wir müssen mutig nach vorne schauen»',
+ 'description': 'Kein Land sei innovativer als die Schweiz, sagte Johann Schneider-Ammann in seiner Neujahrsansprache. Das Land müsse aber seine Hausaufgaben machen.',
+ 'thumbnail': 'http://www.20min.ch/images/content/2/2/0/22050469/10/teaserbreit.jpg'
+ }
+ }, {
+ 'url': 'http://www.20min.ch/videotv/?cid=44&vid=468738',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.20min.ch/ro/sortir/cinema/story/Grandir-au-bahut--c-est-dur-18927411',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ display_id = mobj.group('display_id') or video_id
+
+ webpage = self._download_webpage(url, display_id)
+
+ title = self._html_search_regex(
+ r'<h1>.*?<span>(.+?)</span></h1>',
+ webpage, 'title', default=None)
+ if not title:
+ title = remove_end(re.sub(
+ r'^20 [Mm]inuten.*? -', '', self._og_search_title(webpage)), ' - News')
+
+ if not video_id:
+ video_id = self._search_regex(
+ r'"file\d?"\s*,\s*\"(\d+)', webpage, 'video id')
+
+ description = self._html_search_meta(
+ 'description', webpage, 'description')
+ thumbnail = self._og_search_thumbnail(webpage)
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'url': 'http://speed.20min-tv.ch/%sm.flv' % video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ }
)
-class TwitterCardIE(InfoExtractor):
+class TwitterBaseIE(InfoExtractor):
+ def _get_vmap_video_url(self, vmap_url, video_id):
+ vmap_data = self._download_xml(vmap_url, video_id)
+ return xpath_text(vmap_data, './/MediaFile').strip()
+
+
+class TwitterCardIE(TwitterBaseIE):
IE_NAME = 'twitter:card'
_VALID_URL = r'https?://(?:www\.)?twitter\.com/i/cards/tfw/v1/(?P<id>\d+)'
_TESTS = [
{
'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889',
- 'md5': '4fa26a35f9d1bf4b646590ba8e84be19',
+ # MD5 checksums are different in different places
'info_dict': {
'id': '560070183650213889',
'ext': 'mp4',
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/654001591733886977',
- 'md5': 'b6f35e8b08a0bec6c8af77a2f4b3a814',
+ 'md5': 'd4724ffe6d2437886d004fa5de1043b3',
'info_dict': {
'id': 'dq4Oj5quskI',
'ext': 'mp4',
'ext': 'mp4',
'upload_date': '20151113',
'uploader_id': '1189339351084113920',
- 'uploader': '@ArsenalTerje',
- 'title': 'Vine by @ArsenalTerje',
+ 'uploader': 'ArsenalTerje',
+ 'title': 'Vine by ArsenalTerje',
},
'add_ie': ['Vine'],
}
video_id)
if 'playlist' not in config:
if 'vmapUrl' in config:
- vmap_data = self._download_xml(config['vmapUrl'], video_id)
- video_url = xpath_text(vmap_data, './/MediaFile').strip()
formats.append({
- 'url': video_url,
+ 'url': self._get_vmap_video_url(config['vmapUrl'], video_id),
})
break # same video regardless of UA
continue
_TESTS = [{
'url': 'https://twitter.com/freethenipple/status/643211948184596480',
- 'md5': 'db6612ec5d03355953c3ca9250c97e5e',
+ # MD5 checksums are different in different places
'info_dict': {
'id': '643211948184596480',
'ext': 'mp4',
'uploader': 'Gifs',
'uploader_id': 'giphz',
},
+ 'expected_warnings': ['height', 'width'],
}, {
'url': 'https://twitter.com/starwars/status/665052190608723968',
'md5': '39b7199856dee6cd4432e72c74bc69d4',
return info
mobj = re.search(r'''(?x)
- <video[^>]+class="animated-gif"[^>]+
- (?:data-height="(?P<height>\d+)")?[^>]+
- (?:data-width="(?P<width>\d+)")?[^>]+
- (?:poster="(?P<poster>[^"]+)")?[^>]*>\s*
+ <video[^>]+class="animated-gif"(?P<more_info>[^>]+)>\s*
<source[^>]+video-src="(?P<url>[^"]+)"
''', webpage)
if mobj:
+ more_info = mobj.group('more_info')
+ height = int_or_none(self._search_regex(
+ r'data-height="(\d+)"', more_info, 'height', fatal=False))
+ width = int_or_none(self._search_regex(
+ r'data-width="(\d+)"', more_info, 'width', fatal=False))
+ thumbnail = self._search_regex(
+ r'poster="([^"]+)"', more_info, 'poster', fatal=False)
info.update({
'id': twid,
'url': mobj.group('url'),
- 'height': int_or_none(mobj.group('height')),
- 'width': int_or_none(mobj.group('width')),
- 'thumbnail': mobj.group('poster'),
+ 'height': height,
+ 'width': width,
+ 'thumbnail': thumbnail,
})
return info
- raise ExtractorError('There\'s not video in this tweet.')
+ raise ExtractorError('There\'s no video in this tweet.')
+
+
+class TwitterAmplifyIE(TwitterBaseIE):
+ IE_NAME = 'twitter:amplify'
+ _VALID_URL = 'https?://amp\.twimg\.com/v/(?P<id>[0-9a-f\-]{36})'
+
+ _TEST = {
+ 'url': 'https://amp.twimg.com/v/0ba0c3c7-0af3-4c0a-bed5-7efd1ffa2951',
+ 'md5': '7df102d0b9fd7066b86f3159f8e81bf6',
+ 'info_dict': {
+ 'id': '0ba0c3c7-0af3-4c0a-bed5-7efd1ffa2951',
+ 'ext': 'mp4',
+ 'title': 'Twitter Video',
+ 'thumbnail': 're:^https?://.*',
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ vmap_url = self._html_search_meta(
+ 'twitter:amplify:vmap', webpage, 'vmap url')
+ video_url = self._get_vmap_video_url(vmap_url, video_id)
+
+ thumbnails = []
+ thumbnail = self._html_search_meta(
+ 'twitter:image:src', webpage, 'thumbnail', fatal=False)
+
+ def _find_dimension(target):
+ w = int_or_none(self._html_search_meta(
+ 'twitter:%s:width' % target, webpage, fatal=False))
+ h = int_or_none(self._html_search_meta(
+ 'twitter:%s:height' % target, webpage, fatal=False))
+ return w, h
+
+ if thumbnail:
+ thumbnail_w, thumbnail_h = _find_dimension('image')
+ thumbnails.append({
+ 'url': thumbnail,
+ 'width': thumbnail_w,
+ 'height': thumbnail_h,
+ })
+
+ video_w, video_h = _find_dimension('player')
+ formats = [{
+ 'url': video_url,
+ 'width': video_w,
+ 'height': video_h,
+ }]
+
+ return {
+ 'id': video_id,
+ 'title': 'Twitter Video',
+ 'formats': formats,
+ 'thumbnails': thumbnails,
+ }
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
from ..compat import (
+ compat_HTTPError,
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
+ float_or_none,
+ int_or_none,
sanitized_Request,
+ unescapeHTML,
)
'skip': 'Requires udemy account credentials',
}]
+ def _enroll_course(self, webpage, course_id):
+ checkout_url = unescapeHTML(self._search_regex(
+ r'href=(["\'])(?P<url>https?://(?:www\.)?udemy\.com/payment/checkout/.+?)\1',
+ webpage, 'checkout url', group='url', default=None))
+ if checkout_url:
+ raise ExtractorError(
+ 'Course %s is not free. You have to pay for it before you can download. '
+ 'Use this URL to confirm purchase: %s' % (course_id, checkout_url), expected=True)
+
+ enroll_url = unescapeHTML(self._search_regex(
+ r'href=(["\'])(?P<url>https?://(?:www\.)?udemy\.com/course/subscribe/.+?)\1',
+ webpage, 'enroll url', group='url', default=None))
+ if enroll_url:
+ webpage = self._download_webpage(enroll_url, course_id, 'Enrolling in the course')
+ if '>You have enrolled in' in webpage:
+ self.to_screen('%s: Successfully enrolled in the course' % course_id)
+
+ def _download_lecture(self, course_id, lecture_id):
+ return self._download_json(
+ 'https://www.udemy.com/api-2.0/users/me/subscribed-courses/%s/lectures/%s?%s' % (
+ course_id, lecture_id, compat_urllib_parse.urlencode({
+ 'video_only': '',
+ 'auto_play': '',
+ 'fields[lecture]': 'title,description,asset',
+ 'fields[asset]': 'asset_type,stream_url,thumbnail_url,download_urls,data',
+ 'instructorPreviewMode': 'False',
+ })),
+ lecture_id, 'Downloading lecture JSON')
+
def _handle_error(self, response):
if not isinstance(response, dict):
return
headers['X-Udemy-Client-Id'] = cookie.value
elif cookie.name == 'access_token':
headers['X-Udemy-Bearer-Token'] = cookie.value
+ headers['X-Udemy-Authorization'] = 'Bearer %s' % cookie.value
if isinstance(url_or_request, compat_urllib_request.Request):
for header, value in headers.items():
def _login(self):
(username, password) = self._get_login_info()
if username is None:
- self.raise_login_required('Udemy account is required')
+ return
login_popup = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login popup')
def _real_extract(self, url):
lecture_id = self._match_id(url)
- lecture = self._download_json(
- 'https://www.udemy.com/api-1.1/lectures/%s' % lecture_id,
- lecture_id, 'Downloading lecture JSON')
+ webpage = self._download_webpage(url, lecture_id)
- asset_type = lecture.get('assetType') or lecture.get('asset_type')
+ course_id = self._search_regex(
+ r'data-course-id=["\'](\d+)', webpage, 'course id')
+
+ try:
+ lecture = self._download_lecture(course_id, lecture_id)
+ except ExtractorError as e:
+ # Error could possibly mean we are not enrolled in the course
+ if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
+ self._enroll_course(webpage, course_id)
+ lecture = self._download_lecture(course_id, lecture_id)
+ else:
+ raise
+
+ title = lecture['title']
+ description = lecture.get('description')
+
+ asset = lecture['asset']
+
+ asset_type = asset.get('assetType') or asset.get('asset_type')
if asset_type != 'Video':
raise ExtractorError(
'Lecture %s is not a video' % lecture_id, expected=True)
- asset = lecture['asset']
-
stream_url = asset.get('streamUrl') or asset.get('stream_url')
- mobj = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url)
- if mobj:
- return self.url_result(mobj.group(1), 'Youtube')
+ if stream_url:
+ youtube_url = self._search_regex(
+ r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url, 'youtube URL', default=None)
+ if youtube_url:
+ return self.url_result(youtube_url, 'Youtube')
video_id = asset['id']
thumbnail = asset.get('thumbnailUrl') or asset.get('thumbnail_url')
- duration = asset['data']['duration']
-
- download_url = asset.get('downloadUrl') or asset.get('download_url')
-
- video = download_url.get('Video') or download_url.get('video')
- video_480p = download_url.get('Video480p') or download_url.get('video_480p')
-
- formats = [
- {
- 'url': video_480p[0],
- 'format_id': '360p',
- },
- {
- 'url': video[0],
- 'format_id': '720p',
- },
- ]
-
- title = lecture['title']
- description = lecture['description']
+ duration = float_or_none(asset.get('data', {}).get('duration'))
+ outputs = asset.get('data', {}).get('outputs', {})
+
+ formats = []
+ for format_ in asset.get('download_urls', {}).get('Video', []):
+ video_url = format_.get('file')
+ if not video_url:
+ continue
+ format_id = format_.get('label')
+ f = {
+ 'url': format_['file'],
+ 'height': int_or_none(format_id),
+ }
+ if format_id:
+ # Some videos contain additional metadata (e.g.
+ # https://www.udemy.com/ios9-swift/learn/#/lecture/3383208)
+ output = outputs.get(format_id)
+ if isinstance(output, dict):
+ f.update({
+ 'format_id': '%sp' % (output.get('label') or format_id),
+ 'width': int_or_none(output.get('width')),
+ 'height': int_or_none(output.get('height')),
+ 'vbr': int_or_none(output.get('video_bitrate_in_kbps')),
+ 'vcodec': output.get('video_codec'),
+ 'fps': int_or_none(output.get('frame_rate')),
+ 'abr': int_or_none(output.get('audio_bitrate_in_kbps')),
+ 'acodec': output.get('audio_codec'),
+ 'asr': int_or_none(output.get('audio_sample_rate')),
+ 'tbr': int_or_none(output.get('total_bitrate_in_kbps')),
+ 'filesize': int_or_none(output.get('file_size_in_bytes')),
+ })
+ else:
+ f['format_id'] = '%sp' % format_id
+ formats.append(f)
+
+ self._sort_formats(formats)
return {
'id': video_id,
class UdemyCourseIE(UdemyIE):
IE_NAME = 'udemy:course'
- _VALID_URL = r'https?://www\.udemy\.com/(?P<coursepath>[\da-z-]+)'
- _SUCCESSFULLY_ENROLLED = '>You have enrolled in this course!<'
- _ALREADY_ENROLLED = '>You are already taking this course.<'
+ _VALID_URL = r'https?://www\.udemy\.com/(?P<id>[\da-z-]+)'
_TESTS = []
@classmethod
return False if UdemyIE.suitable(url) else super(UdemyCourseIE, cls).suitable(url)
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- course_path = mobj.group('coursepath')
+ course_path = self._match_id(url)
+
+ webpage = self._download_webpage(url, course_path)
response = self._download_json(
'https://www.udemy.com/api-1.1/courses/%s' % course_path,
course_path, 'Downloading course JSON')
- course_id = int(response['id'])
- course_title = response['title']
-
- webpage = self._download_webpage(
- 'https://www.udemy.com/course/subscribe/?courseId=%s' % course_id,
- course_id, 'Enrolling in the course')
+ course_id = response['id']
+ course_title = response.get('title')
- if self._SUCCESSFULLY_ENROLLED in webpage:
- self.to_screen('%s: Successfully enrolled in' % course_id)
- elif self._ALREADY_ENROLLED in webpage:
- self.to_screen('%s: Already enrolled in' % course_id)
+ self._enroll_course(webpage, course_id)
response = self._download_json(
'https://www.udemy.com/api-1.1/courses/%s/curriculum' % course_id,
course_id, 'Downloading course curriculum')
- entries = [
- self.url_result(
- 'https://www.udemy.com/%s/#/lecture/%s' % (course_path, asset['id']), 'Udemy')
- for asset in response if asset.get('assetType') or asset.get('asset_type') == 'Video'
- ]
+ entries = []
+ chapter, chapter_number = None, None
+ for asset in response:
+ asset_type = asset.get('assetType') or asset.get('asset_type')
+ if asset_type == 'Video':
+ asset_id = asset.get('id')
+ if asset_id:
+ entry = {
+ '_type': 'url_transparent',
+ 'url': 'https://www.udemy.com/%s/#/lecture/%s' % (course_path, asset['id']),
+ 'ie_key': UdemyIE.ie_key(),
+ }
+ if chapter_number:
+ entry['chapter_number'] = chapter_number
+ if chapter:
+ entry['chapter'] = chapter
+ entries.append(entry)
+ elif asset.get('type') == 'chapter':
+ chapter_number = asset.get('index') or asset.get('object_index')
+ chapter = asset.get('title')
return self.playlist_result(entries, course_id, course_title)
+++ /dev/null
-# coding: utf-8
-from __future__ import unicode_literals
-
-import re
-
-from .common import InfoExtractor
-from ..compat import compat_urllib_parse_urlparse
-from ..utils import (
- ExtractorError,
- qualities,
- unified_strdate,
- clean_html,
-)
-
-
-class UltimediaIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?ultimedia\.com/default/index/video[^/]+/id/(?P<id>[\d+a-z]+)'
- _TESTS = [{
- # news
- 'url': 'https://www.ultimedia.com/default/index/videogeneric/id/s8uk0r',
- 'md5': '276a0e49de58c7e85d32b057837952a2',
- 'info_dict': {
- 'id': 's8uk0r',
- 'ext': 'mp4',
- 'title': 'Loi sur la fin de vie: le texte prévoit un renforcement des directives anticipées',
- 'description': 'md5:3e5c8fd65791487333dda5db8aed32af',
- 'thumbnail': 're:^https?://.*\.jpg',
- 'upload_date': '20150317',
- },
- }, {
- # music
- 'url': 'https://www.ultimedia.com/default/index/videomusic/id/xvpfp8',
- 'md5': '2ea3513813cf230605c7e2ffe7eca61c',
- 'info_dict': {
- 'id': 'xvpfp8',
- 'ext': 'mp4',
- 'title': "Two - C'est la vie (Clip)",
- 'description': 'Two',
- 'thumbnail': 're:^https?://.*\.jpg',
- 'upload_date': '20150224',
- },
- }]
-
- def _real_extract(self, url):
- video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
-
- deliver_url = self._proto_relative_url(self._search_regex(
- r'<iframe[^>]+src="((?:https?:)?//(?:www\.)?ultimedia\.com/deliver/[^"]+)"',
- webpage, 'deliver URL'), compat_urllib_parse_urlparse(url).scheme + ':')
-
- deliver_page = self._download_webpage(
- deliver_url, video_id, 'Downloading iframe page')
-
- if '>This video is currently not available' in deliver_page:
- raise ExtractorError(
- 'Video %s is currently not available' % video_id, expected=True)
-
- player = self._parse_json(
- self._search_regex(
- r"jwplayer\('player(?:_temp)?'\)\.setup\(({.+?})\)\.on",
- deliver_page, 'player'),
- video_id)
-
- quality = qualities(['flash', 'html5'])
- formats = []
- for mode in player['modes']:
- video_url = mode.get('config', {}).get('file')
- if not video_url:
- continue
- if re.match(r'https?://www\.youtube\.com/.+?', video_url):
- return self.url_result(video_url, 'Youtube')
- formats.append({
- 'url': video_url,
- 'format_id': mode.get('type'),
- 'quality': quality(mode.get('type')),
- })
- self._sort_formats(formats)
-
- thumbnail = player.get('image')
-
- title = clean_html((
- self._html_search_regex(
- r'(?s)<div\s+id="catArticle">.+?</div>(.+?)</h1>',
- webpage, 'title', default=None) or
- self._search_regex(
- r"var\s+nameVideo\s*=\s*'([^']+)'",
- deliver_page, 'title')))
-
- description = clean_html(self._html_search_regex(
- r'(?s)<span>Description</span>(.+?)</p>', webpage,
- 'description', fatal=False))
-
- upload_date = unified_strdate(self._search_regex(
- r'Ajouté le\s*<span>([^<]+)', webpage,
- 'upload date', fatal=False))
-
- return {
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
- 'upload_date': upload_date,
- 'formats': formats,
- }
webpage = self._download_webpage(url, video_id)
- files = set(re.findall(r'file\s*:\s*"([^"]+)"', webpage))
+ files = set(re.findall(r'file\s*:\s*"(/[^"]+)"', webpage))
quality = qualities(['SD', 'HD'])
formats = []
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
- # some sites use this embed format (see: http://github.com/rg3/youtube-dl/issues/2990)
+ # some sites use this embed format (see: https://github.com/rg3/youtube-dl/issues/2990)
if m.group('type') == 'embed/recorded':
video_id = m.group('id')
desktop_url = 'http://www.ustream.tv/recorded/' + video_id
title = self._html_search_regex(r'<title>(.*)</title>',
webpage, 'title').split('/')[0].strip()
- info_url = "http://vbox7.com/play/magare.do"
+ info_url = 'http://vbox7.com/play/magare.do'
data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id})
info_request = sanitized_Request(info_url, data)
info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
ExtractorError,
int_or_none,
sanitized_Request,
+ parse_iso8601,
)
class VevoIE(InfoExtractor):
- """
+ '''
Accepts urls from vevo.com or in the format 'vevo:{id}'
(currently used by MTVIE and MySpaceIE)
- """
+ '''
_VALID_URL = r'''(?x)
(?:https?://www\.vevo\.com/watch/(?:[^/]+/(?:[^/]+/)?)?|
https?://cache\.vevo\.com/m/html/embed\.html\?video=|
_TESTS = [{
'url': 'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280',
- "md5": "95ee28ee45e70130e3ab02b0f579ae23",
+ 'md5': '95ee28ee45e70130e3ab02b0f579ae23',
'info_dict': {
'id': 'GB1101300280',
'ext': 'mp4',
- "upload_date": "20130624",
- "uploader": "Hurts",
- "title": "Somebody to Die For",
- "duration": 230.12,
- "width": 1920,
- "height": 1080,
- # timestamp and upload_date are often incorrect; seem to change randomly
- 'timestamp': int,
- }
+ 'title': 'Somebody to Die For',
+ 'upload_date': '20130624',
+ 'uploader': 'Hurts',
+ 'timestamp': 1372057200,
+ },
}, {
'note': 'v3 SMIL format',
'url': 'http://www.vevo.com/watch/cassadee-pope/i-wish-i-could-break-your-heart/USUV71302923',
'info_dict': {
'id': 'USUV71302923',
'ext': 'mp4',
+ 'title': 'I Wish I Could Break Your Heart',
'upload_date': '20140219',
'uploader': 'Cassadee Pope',
- 'title': 'I Wish I Could Break Your Heart',
- 'duration': 226.101,
- 'age_limit': 0,
- 'timestamp': int,
- }
+ 'timestamp': 1392796919,
+ },
}, {
'note': 'Age-limited video',
'url': 'https://www.vevo.com/watch/justin-timberlake/tunnel-vision-explicit/USRV81300282',
'info_dict': {
'id': 'USRV81300282',
'ext': 'mp4',
- 'age_limit': 18,
'title': 'Tunnel Vision (Explicit)',
+ 'upload_date': '20130703',
+ 'age_limit': 18,
'uploader': 'Justin Timberlake',
- 'upload_date': 're:2013070[34]',
- 'timestamp': int,
+ 'timestamp': 1372888800,
+ },
+ }, {
+ 'note': 'No video_info',
+ 'url': 'http://www.vevo.com/watch/k-camp-1/Till-I-Die/USUV71503000',
+ 'md5': '8b83cc492d72fc9cf74a02acee7dc1b0',
+ 'info_dict': {
+ 'id': 'USUV71503000',
+ 'ext': 'mp4',
+ 'title': 'Till I Die',
+ 'upload_date': '20151207',
+ 'age_limit': 18,
+ 'uploader': 'K Camp',
+ 'timestamp': 1449468000,
},
- 'params': {
- 'skip_download': 'true',
- }
}]
- _SMIL_BASE_URL = 'http://smil.lvl3.vevo.com/'
-
- def _real_initialize(self):
- req = sanitized_Request(
- 'http://www.vevo.com/auth', data=b'')
- webpage = self._download_webpage(
- req, None,
- note='Retrieving oauth token',
- errnote='Unable to retrieve oauth token',
- fatal=False)
- if webpage is False:
- self._oauth_token = None
- else:
- self._oauth_token = self._search_regex(
- r'access_token":\s*"([^"]+)"',
- webpage, 'access token', fatal=False)
-
- def _formats_from_json(self, video_info):
- last_version = {'version': -1}
- for version in video_info['videoVersions']:
- # These are the HTTP downloads, other types are for different manifests
- if version['sourceType'] == 2:
- if version['version'] > last_version['version']:
- last_version = version
- if last_version['version'] == -1:
- raise ExtractorError('Unable to extract last version of the video')
-
- renditions = compat_etree_fromstring(last_version['data'])
- formats = []
- # Already sorted from worst to best quality
- for rend in renditions.findall('rendition'):
- attr = rend.attrib
- format_note = '%(videoCodec)s@%(videoBitrate)4sk, %(audioCodec)s@%(audioBitrate)3sk' % attr
- formats.append({
- 'url': attr['url'],
- 'format_id': attr['name'],
- 'format_note': format_note,
- 'height': int(attr['frameheight']),
- 'width': int(attr['frameWidth']),
- })
- return formats
+ _SMIL_BASE_URL = 'http://smil.lvl3.vevo.com'
+ _SOURCE_TYPES = {
+ 0: 'youtube',
+ 1: 'brightcove',
+ 2: 'http',
+ 3: 'hls_ios',
+ 4: 'hls',
+ 5: 'smil', # http
+ 7: 'f4m_cc',
+ 8: 'f4m_ak',
+ 9: 'f4m_l3',
+ 10: 'ism',
+ 13: 'smil', # rtmp
+ 18: 'dash',
+ }
+ _VERSIONS = {
+ 0: 'youtube', # only in AuthenticateVideo videoVersions
+ 1: 'level3',
+ 2: 'akamai',
+ 3: 'level3',
+ 4: 'amazon',
+ }
- def _formats_from_smil(self, smil_xml):
+ def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
formats = []
- smil_doc = compat_etree_fromstring(smil_xml.encode('utf-8'))
- els = smil_doc.findall('.//{http://www.w3.org/2001/SMIL20/Language}video')
+ els = smil.findall('.//{http://www.w3.org/2001/SMIL20/Language}video')
for el in els:
src = el.attrib['src']
m = re.match(r'''(?xi)
(?P<ext>[a-z0-9]+):
(?P<path>
[/a-z0-9]+ # The directory and main part of the URL
- _(?P<cbr>[0-9]+)k
+ _(?P<tbr>[0-9]+)k
_(?P<width>[0-9]+)x(?P<height>[0-9]+)
_(?P<vcodec>[a-z0-9]+)
_(?P<vbr>[0-9]+)
format_url = self._SMIL_BASE_URL + m.group('path')
formats.append({
'url': format_url,
- 'format_id': 'SMIL_' + m.group('cbr'),
+ 'format_id': 'smil_' + m.group('tbr'),
'vcodec': m.group('vcodec'),
'acodec': m.group('acodec'),
+ 'tbr': int(m.group('tbr')),
'vbr': int(m.group('vbr')),
'abr': int(m.group('abr')),
'ext': m.group('ext'),
})
return formats
- def _download_api_formats(self, video_id):
- if not self._oauth_token:
- self._downloader.report_warning(
- 'No oauth token available, skipping API HLS download')
- return []
-
- api_url = 'https://apiv2.vevo.com/video/%s/streams/hls?token=%s' % (
- video_id, self._oauth_token)
- api_data = self._download_json(
- api_url, video_id,
- note='Downloading HLS formats',
- errnote='Failed to download HLS format list', fatal=False)
- if api_data is None:
- return []
-
- m3u8_url = api_data[0]['url']
- return self._extract_m3u8_formats(
- m3u8_url, video_id, entry_protocol='m3u8_native', ext='mp4',
- preference=0)
+ def _initialize_api(self, video_id):
+ req = sanitized_Request(
+ 'http://www.vevo.com/auth', data=b'')
+ webpage = self._download_webpage(
+ req, None,
+ note='Retrieving oauth token',
+ errnote='Unable to retrieve oauth token')
+
+ if 'THIS PAGE IS CURRENTLY UNAVAILABLE IN YOUR REGION' in webpage:
+ raise ExtractorError(
+ '%s said: This page is currently unavailable in your region.' % self.IE_NAME, expected=True)
+
+ auth_info = self._parse_json(webpage, video_id)
+ self._api_url_template = self.http_scheme() + '//apiv2.vevo.com/%s?token=' + auth_info['access_token']
+
+ def _call_api(self, path, video_id, note, errnote, fatal=True):
+ return self._download_json(self._api_url_template % path, video_id, note, errnote)
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
json_url = 'http://videoplayer.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id
- response = self._download_json(json_url, video_id)
- video_info = response['video']
+ response = self._download_json(
+ json_url, video_id, 'Downloading video info', 'Unable to download info')
+ video_info = response.get('video') or {}
+ video_versions = video_info.get('videoVersions')
+ uploader = None
+ timestamp = None
+ view_count = None
+ formats = []
if not video_info:
- if 'statusMessage' in response:
- raise ExtractorError('%s said: %s' % (self.IE_NAME, response['statusMessage']), expected=True)
- raise ExtractorError('Unable to extract videos')
+ if response.get('statusCode') != 909:
+ ytid = response.get('errorInfo', {}).get('ytid')
+ if ytid:
+ self.report_warning(
+ 'Video is geoblocked, trying with the YouTube video %s' % ytid)
+ return self.url_result(ytid, 'Youtube', ytid)
+
+ if 'statusMessage' in response:
+ raise ExtractorError('%s said: %s' % (
+ self.IE_NAME, response['statusMessage']), expected=True)
+ raise ExtractorError('Unable to extract videos')
- formats = self._formats_from_json(video_info)
+ self._initialize_api(video_id)
+ video_info = self._call_api(
+ 'video/%s' % video_id, video_id, 'Downloading api video info',
+ 'Failed to download video info')
+
+ video_versions = self._call_api(
+ 'video/%s/streams' % video_id, video_id,
+ 'Downloading video versions info',
+ 'Failed to download video versions info')
+
+ timestamp = parse_iso8601(video_info.get('releaseDate'))
+ artists = video_info.get('artists')
+ if artists:
+ uploader = artists[0]['name']
+ view_count = int_or_none(video_info.get('views', {}).get('total'))
+
+ for video_version in video_versions:
+ version = self._VERSIONS.get(video_version['version'])
+ version_url = video_version.get('url')
+ if not version_url:
+ continue
+
+ if '.ism' in version_url:
+ continue
+ elif '.mpd' in version_url:
+ formats.extend(self._extract_mpd_formats(
+ version_url, video_id, mpd_id='dash-%s' % version,
+ note='Downloading %s MPD information' % version,
+ errnote='Failed to download %s MPD information' % version,
+ fatal=False))
+ elif '.m3u8' in version_url:
+ formats.extend(self._extract_m3u8_formats(
+ version_url, video_id, 'mp4', 'm3u8_native',
+ m3u8_id='hls-%s' % version,
+ note='Downloading %s m3u8 information' % version,
+ errnote='Failed to download %s m3u8 information' % version,
+ fatal=False))
+ else:
+ m = re.search(r'''(?xi)
+ _(?P<width>[0-9]+)x(?P<height>[0-9]+)
+ _(?P<vcodec>[a-z0-9]+)
+ _(?P<vbr>[0-9]+)
+ _(?P<acodec>[a-z0-9]+)
+ _(?P<abr>[0-9]+)
+ \.(?P<ext>[a-z0-9]+)''', version_url)
+ if not m:
+ continue
+
+ formats.append({
+ 'url': version_url,
+ 'format_id': 'http-%s-%s' % (version, video_version['quality']),
+ 'vcodec': m.group('vcodec'),
+ 'acodec': m.group('acodec'),
+ 'vbr': int(m.group('vbr')),
+ 'abr': int(m.group('abr')),
+ 'ext': m.group('ext'),
+ 'width': int(m.group('width')),
+ 'height': int(m.group('height')),
+ })
+ else:
+ timestamp = int_or_none(self._search_regex(
+ r'/Date\((\d+)\)/',
+ video_info['releaseDate'], 'release date', fatal=False),
+ scale=1000)
+ artists = video_info.get('mainArtists')
+ if artists:
+ uploader = artists[0]['artistName']
+
+ smil_parsed = False
+ for video_version in video_info['videoVersions']:
+ version = self._VERSIONS.get(video_version['version'])
+ if version == 'youtube':
+ continue
+ else:
+ source_type = self._SOURCE_TYPES.get(video_version['sourceType'])
+ renditions = compat_etree_fromstring(video_version['data'])
+ if source_type == 'http':
+ for rend in renditions.findall('rendition'):
+ attr = rend.attrib
+ formats.append({
+ 'url': attr['url'],
+ 'format_id': 'http-%s-%s' % (version, attr['name']),
+ 'height': int_or_none(attr.get('frameheight')),
+ 'width': int_or_none(attr.get('frameWidth')),
+ 'tbr': int_or_none(attr.get('totalBitrate')),
+ 'vbr': int_or_none(attr.get('videoBitrate')),
+ 'abr': int_or_none(attr.get('audioBitrate')),
+ 'vcodec': attr.get('videoCodec'),
+ 'acodec': attr.get('audioCodec'),
+ })
+ elif source_type == 'hls':
+ formats.extend(self._extract_m3u8_formats(
+ renditions.find('rendition').attrib['url'], video_id,
+ 'mp4', 'm3u8_native', m3u8_id='hls-%s' % version,
+ note='Downloading %s m3u8 information' % version,
+ errnote='Failed to download %s m3u8 information' % version,
+ fatal=False))
+ elif source_type == 'smil' and version == 'level3' and not smil_parsed:
+ formats.extend(self._extract_smil_formats(
+ renditions.find('rendition').attrib['url'], video_id, False))
+ smil_parsed = True
+ self._sort_formats(formats)
+
+ title = video_info['title']
is_explicit = video_info.get('isExplicit')
if is_explicit is True:
else:
age_limit = None
- # Download via HLS API
- formats.extend(self._download_api_formats(video_id))
-
- # Download SMIL
- smil_blocks = sorted((
- f for f in video_info['videoVersions']
- if f['sourceType'] == 13),
- key=lambda f: f['version'])
- smil_url = '%s/Video/V2/VFILE/%s/%sr.smil' % (
- self._SMIL_BASE_URL, video_id, video_id.lower())
- if smil_blocks:
- smil_url_m = self._search_regex(
- r'url="([^"]+)"', smil_blocks[-1]['data'], 'SMIL URL',
- default=None)
- if smil_url_m is not None:
- smil_url = smil_url_m
- if smil_url:
- smil_xml = self._download_webpage(
- smil_url, video_id, 'Downloading SMIL info', fatal=False)
- if smil_xml:
- formats.extend(self._formats_from_smil(smil_xml))
-
- self._sort_formats(formats)
- timestamp_ms = int_or_none(self._search_regex(
- r'/Date\((\d+)\)/',
- video_info['launchDate'], 'launch date', fatal=False))
+ duration = video_info.get('duration')
return {
'id': video_id,
- 'title': video_info['title'],
+ 'title': title,
'formats': formats,
- 'thumbnail': video_info['imageUrl'],
- 'timestamp': timestamp_ms // 1000,
- 'uploader': video_info['mainArtists'][0]['artistName'],
- 'duration': video_info['duration'],
+ 'thumbnail': video_info.get('imageUrl') or video_info.get('thumbnailUrl'),
+ 'timestamp': timestamp,
+ 'uploader': uploader,
+ 'duration': duration,
+ 'view_count': view_count,
'age_limit': age_limit,
}
import re
from .common import InfoExtractor
+from .xstream import XstreamIE
from ..utils import (
ExtractorError,
float_or_none,
)
-class VGTVIE(InfoExtractor):
- IE_DESC = 'VGTV and BTTV'
+class VGTVIE(XstreamIE):
+ IE_DESC = 'VGTV, BTTV, FTV, Aftenposten and Aftonbladet'
+
+ _HOST_TO_APPNAME = {
+ 'vgtv.no': 'vgtv',
+ 'bt.no/tv': 'bttv',
+ 'aftenbladet.no/tv': 'satv',
+ 'fvn.no/fvntv': 'fvntv',
+ 'aftenposten.no/webtv': 'aptv',
+ }
+
+ _APP_NAME_TO_VENDOR = {
+ 'vgtv': 'vgtv',
+ 'bttv': 'bt',
+ 'satv': 'sa',
+ 'fvntv': 'fvn',
+ 'aptv': 'ap',
+ }
+
_VALID_URL = r'''(?x)
- (?:
- vgtv:|
- http://(?:www\.)?
+ (?:https?://(?:www\.)?
+ (?P<host>
+ %s
)
- (?P<host>vgtv|bt)
+ /
(?:
- :|
- \.no/(?:tv/)?\#!/(?:video|live)/
- )
- (?P<id>[0-9]+)
- '''
+ \#!/(?:video|live)/|
+ embed?.*id=
+ )|
+ (?P<appname>
+ %s
+ ):)
+ (?P<id>\d+)
+ ''' % ('|'.join(_HOST_TO_APPNAME.keys()), '|'.join(_APP_NAME_TO_VENDOR.keys()))
+
_TESTS = [
{
# streamType: vod
# m3u8 download
'skip_download': True,
},
+ 'skip': 'Video is no longer available',
},
{
- # streamType: live
+ # streamType: wasLive
'url': 'http://www.vgtv.no/#!/live/113063/direkte-v75-fra-solvalla',
'info_dict': {
'id': '113063',
- 'ext': 'flv',
- 'title': 're:^DIREKTE: V75 fra Solvalla [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
+ 'ext': 'mp4',
+ 'title': 'V75 fra Solvalla 30.05.15',
'description': 'md5:b3743425765355855f88e096acc93231',
'thumbnail': 're:^https?://.*\.jpg',
- 'duration': 0,
+ 'duration': 25966,
'timestamp': 1432975582,
'upload_date': '20150530',
'view_count': int,
'skip_download': True,
},
},
+ {
+ 'url': 'http://www.aftenposten.no/webtv/#!/video/21039/trailer-sweatshop-i-can-t-take-any-more',
+ 'md5': 'fd828cd29774a729bf4d4425fe192972',
+ 'info_dict': {
+ 'id': '21039',
+ 'ext': 'mov',
+ 'title': 'TRAILER: «SWEATSHOP» - I can´t take any more',
+ 'description': 'md5:21891f2b0dd7ec2f78d84a50e54f8238',
+ 'duration': 66,
+ 'timestamp': 1417002452,
+ 'upload_date': '20141126',
+ 'view_count': int,
+ }
+ },
{
'url': 'http://www.bt.no/tv/#!/video/100250/norling-dette-er-forskjellen-paa-1-divisjon-og-eliteserien',
'only_matching': True,
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
host = mobj.group('host')
-
- HOST_WEBSITES = {
- 'vgtv': 'vgtv',
- 'bt': 'bttv',
- }
+ appname = self._HOST_TO_APPNAME[host] if host else mobj.group('appname')
+ vendor = self._APP_NAME_TO_VENDOR[appname]
data = self._download_json(
'http://svp.vg.no/svp/api/v1/%s/assets/%s?appName=%s-website'
- % (host, video_id, HOST_WEBSITES[host]),
+ % (vendor, video_id, appname),
video_id, 'Downloading media JSON')
if data.get('status') == 'inactive':
raise ExtractorError(
'Video %s is no longer available' % video_id, expected=True)
+ info = {
+ 'formats': [],
+ }
+ if len(video_id) == 5:
+ if appname == 'bttv':
+ info = self._extract_video_info('btno', video_id)
+ elif appname == 'aptv':
+ info = self._extract_video_info('ap', video_id)
+
streams = data['streamUrls']
stream_type = data.get('streamType')
hls_url = streams.get('hls')
if hls_url:
formats.extend(self._extract_m3u8_formats(
- hls_url, video_id, 'mp4', m3u8_id='hls'))
+ hls_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
hds_url = streams.get('hds')
- # wasLive hds are always 404
- if hds_url and stream_type != 'wasLive':
- formats.extend(self._extract_f4m_formats(
- hds_url + '?hdcore=3.2.0&plugin=aasp-3.2.0.77.18',
- video_id, f4m_id='hds'))
+ if hds_url:
+ hdcore_sign = 'hdcore=3.7.0'
+ f4m_formats = self._extract_f4m_formats(
+ hds_url + '?%s' % hdcore_sign, video_id, f4m_id='hds', fatal=False)
+ if f4m_formats:
+ for entry in f4m_formats:
+ # URLs without the extra param induce an 404 error
+ entry.update({'extra_param_to_segment_url': hdcore_sign})
+ formats.append(entry)
+ mp4_urls = streams.get('pseudostreaming') or []
mp4_url = streams.get('mp4')
if mp4_url:
- _url = hls_url or hds_url
- MP4_URL_TEMPLATE = '%s/%%s.%s' % (mp4_url.rpartition('/')[0], mp4_url.rpartition('.')[-1])
- for mp4_format in _url.split(','):
- m = re.search('(?P<width>\d+)_(?P<height>\d+)_(?P<vbr>\d+)', mp4_format)
- if not m:
- continue
- width = int(m.group('width'))
- height = int(m.group('height'))
- vbr = int(m.group('vbr'))
- formats.append({
- 'url': MP4_URL_TEMPLATE % mp4_format,
- 'format_id': 'mp4-%s' % vbr,
- 'width': width,
- 'height': height,
- 'vbr': vbr,
- 'preference': 1,
+ mp4_urls.append(mp4_url)
+ for mp4_url in mp4_urls:
+ format_info = {
+ 'url': mp4_url,
+ }
+ mobj = re.search('(\d+)_(\d+)_(\d+)', mp4_url)
+ if mobj:
+ tbr = int(mobj.group(3))
+ format_info.update({
+ 'width': int(mobj.group(1)),
+ 'height': int(mobj.group(2)),
+ 'tbr': tbr,
+ 'format_id': 'mp4-%s' % tbr,
})
- self._sort_formats(formats)
+ formats.append(format_info)
+
+ info['formats'].extend(formats)
+
+ self._sort_formats(info['formats'])
- return {
+ info.update({
'id': video_id,
- 'title': self._live_title(data['title']),
+ 'title': self._live_title(data['title']) if stream_type == 'live' else data['title'],
'description': data['description'],
'thumbnail': data['images']['main'] + '?t[]=900x506q80',
'timestamp': data['published'],
'duration': float_or_none(data['duration'], 1000),
'view_count': data['displays'],
- 'formats': formats,
'is_live': True if stream_type == 'live' else False,
- }
+ })
+ return info
class BTArticleIE(InfoExtractor):
_VALID_URL = 'http://(?:www\.)?bt\.no/(?:[^/]+/)+(?P<id>[^/]+)-\d+\.html'
_TEST = {
'url': 'http://www.bt.no/nyheter/lokalt/Kjemper-for-internatet-1788214.html',
- 'md5': 'd055e8ee918ef2844745fcfd1a4175fb',
+ 'md5': '2acbe8ad129b3469d5ae51b1158878df',
'info_dict': {
'id': '23199',
'ext': 'mp4',
def _real_extract(self, url):
webpage = self._download_webpage(url, self._match_id(url))
video_id = self._search_regex(
- r'SVP\.Player\.load\(\s*(\d+)', webpage, 'video id')
- return self.url_result('vgtv:bt:%s' % video_id, 'VGTV')
+ r'<video[^>]+data-id="(\d+)"', webpage, 'video id')
+ return self.url_result('bttv:%s' % video_id, 'VGTV')
class BTVestlendingenIE(InfoExtractor):
IE_NAME = 'bt:vestlendingen'
IE_DESC = 'Bergens Tidende - Vestlendingen'
_VALID_URL = 'http://(?:www\.)?bt\.no/spesial/vestlendingen/#!/(?P<id>\d+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.bt.no/spesial/vestlendingen/#!/86588',
'md5': 'd7d17e3337dc80de6d3a540aefbe441b',
'info_dict': {
'timestamp': 1430473209,
'upload_date': '20150501',
},
- }
+ 'skip': '404 Error',
+ }, {
+ 'url': 'http://www.bt.no/spesial/vestlendingen/#!/86255',
+ 'md5': 'a2893f8632e96389f4bdf36aa9463ceb',
+ 'info_dict': {
+ 'id': '86255',
+ 'ext': 'mov',
+ 'title': 'Du må tåle å fryse og være sulten',
+ 'description': 'md5:b8046f4d022d5830ddab04865791d063',
+ 'upload_date': '20150321',
+ 'timestamp': 1426942023,
+ },
+ }]
def _real_extract(self, url):
- return self.url_result('xstream:btno:%s' % self._match_id(url), 'Xstream')
+ return self.url_result('bttv:%s' % self._match_id(url), 'VGTV')
'id': '43cW1mYzpia9IlestBjVpd23Yu3afAfp',
'ext': 'mp4',
'title': 'VICE_COWBOYCAPITALISTS_PART01_v1_VICE_WM_1080p.mov',
+ 'duration': 725.983,
},
'params': {
# Requires ffmpeg (m3u8 manifest)
from __future__ import unicode_literals
from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_parse,
+ compat_urlparse,
+)
from ..utils import (
float_or_none,
int_or_none,
_VALID_URL = r'https?://(?:www\.)?viddler\.com/(?:v|embed|player)/(?P<id>[a-z0-9]+)'
_TESTS = [{
'url': 'http://www.viddler.com/v/43903784',
- 'md5': 'ae43ad7cb59431ce043f0ff7fa13cbf4',
+ 'md5': '9eee21161d2c7f5b39690c3e325fab2f',
'info_dict': {
'id': '43903784',
- 'ext': 'mp4',
+ 'ext': 'mov',
'title': 'Video Made Easy',
'description': 'md5:6a697ebd844ff3093bd2e82c37b409cd',
'uploader': 'viddler',
}
}, {
'url': 'http://www.viddler.com/v/4d03aad9/',
- 'md5': 'faa71fbf70c0bee7ab93076fd007f4b0',
+ 'md5': 'f12c5a7fa839c47a79363bfdf69404fb',
'info_dict': {
'id': '4d03aad9',
- 'ext': 'mp4',
+ 'ext': 'ts',
'title': 'WALL-TO-GORTAT',
'upload_date': '20150126',
'uploader': 'deadspin',
}
}, {
'url': 'http://www.viddler.com/player/221ebbbd/0/',
- 'md5': '0defa2bd0ea613d14a6e9bd1db6be326',
+ 'md5': '740511f61d3d1bb71dc14a0fe01a1c10',
'info_dict': {
'id': '221ebbbd',
- 'ext': 'mp4',
+ 'ext': 'mov',
'title': 'LETeens-Grammar-snack-third-conditional',
'description': ' ',
'upload_date': '20140929',
'view_count': int,
'comment_count': int,
}
+ }, {
+ # secret protected
+ 'url': 'http://www.viddler.com/v/890c0985?secret=34051570',
+ 'info_dict': {
+ 'id': '890c0985',
+ 'ext': 'mp4',
+ 'title': 'Complete Property Training - Traineeships',
+ 'description': ' ',
+ 'upload_date': '20130606',
+ 'uploader': 'TiffanyBowtell',
+ 'timestamp': 1370496993,
+ 'view_count': int,
+ 'comment_count': int,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
}]
def _real_extract(self, url):
video_id = self._match_id(url)
- json_url = (
- 'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json?video_id=%s&key=v0vhrt7bg2xq1vyxhkct' %
- video_id)
+ query = {
+ 'video_id': video_id,
+ 'key': 'v0vhrt7bg2xq1vyxhkct',
+ }
+
+ qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+ secret = qs.get('secret', [None])[0]
+ if secret:
+ query['secret'] = secret
+
headers = {'Referer': 'http://static.cdn-ec.viddler.com/js/arpeggio/v2/embed.html'}
- request = sanitized_Request(json_url, None, headers)
+ request = sanitized_Request(
+ 'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json?%s'
+ % compat_urllib_parse.urlencode(query), None, headers)
data = self._download_json(request, video_id)['video']
formats = []
class VideoMegaIE(InfoExtractor):
+ _WORKING = False
_VALID_URL = r'(?:videomega:|https?://(?:www\.)?videomega\.tv/(?:(?:view|iframe|cdn)\.php)?\?ref=)(?P<id>[A-Za-z0-9]+)'
_TESTS = [{
'url': 'http://videomega.tv/cdn.php?ref=AOSQBJYKIDDIKYJBQSOA',
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ int_or_none,
+ parse_age_limit,
+ parse_iso8601,
+ xpath_text,
+)
+
+
+class VideomoreIE(InfoExtractor):
+ IE_NAME = 'videomore'
+ _VALID_URL = r'videomore:(?P<sid>\d+)$|https?://videomore\.ru/(?:(?:embed|[^/]+/[^/]+)/|[^/]+\?.*\btrack_id=)(?P<id>\d+)(?:[/?#&]|\.(?:xml|json)|$)'
+ _TESTS = [{
+ 'url': 'http://videomore.ru/kino_v_detalayah/5_sezon/367617',
+ 'md5': '70875fbf57a1cd004709920381587185',
+ 'info_dict': {
+ 'id': '367617',
+ 'ext': 'flv',
+ 'title': 'В гостях Алексей Чумаков и Юлия Ковальчук',
+ 'description': 'В гостях – лучшие романтические комедии года, «Выживший» Иньярриту и «Стив Джобс» Дэнни Бойла.',
+ 'series': 'Кино в деталях',
+ 'episode': 'В гостях Алексей Чумаков и Юлия Ковальчук',
+ 'episode_number': None,
+ 'season': 'Сезон 2015',
+ 'season_number': 5,
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 2910,
+ 'age_limit': 16,
+ 'view_count': int,
+ },
+ }, {
+ 'url': 'http://videomore.ru/embed/259974',
+ 'info_dict': {
+ 'id': '259974',
+ 'ext': 'flv',
+ 'title': '80 серия',
+ 'description': '«Медведей» ждет решающий матч. Макеев выясняет отношения со Стрельцовым. Парни узнают подробности прошлого Макеева.',
+ 'series': 'Молодежка',
+ 'episode': '80 серия',
+ 'episode_number': 40,
+ 'season': '2 сезон',
+ 'season_number': 2,
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 2809,
+ 'age_limit': 16,
+ 'view_count': int,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://videomore.ru/molodezhka/sezon_promo/341073',
+ 'info_dict': {
+ 'id': '341073',
+ 'ext': 'flv',
+ 'title': 'Команда проиграла из-за Бакина?',
+ 'description': 'Молодежка 3 сезон скоро',
+ 'series': 'Молодежка',
+ 'episode': 'Команда проиграла из-за Бакина?',
+ 'episode_number': None,
+ 'season': 'Промо',
+ 'season_number': 99,
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 29,
+ 'age_limit': 16,
+ 'view_count': int,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://videomore.ru/elki_3?track_id=364623',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://videomore.ru/embed/364623',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://videomore.ru/video/tracks/364623.xml',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://videomore.ru/video/tracks/364623.json',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://videomore.ru/video/tracks/158031/quotes/33248',
+ 'only_matching': True,
+ }, {
+ 'url': 'videomore:367617',
+ 'only_matching': True,
+ }]
+
+ @staticmethod
+ def _extract_url(webpage):
+ mobj = re.search(
+ r'<object[^>]+data=(["\'])https?://videomore.ru/player\.swf\?.*config=(?P<url>https?://videomore\.ru/(?:[^/]+/)+\d+\.xml).*\1',
+ webpage)
+ if mobj:
+ return mobj.group('url')
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('sid') or mobj.group('id')
+
+ video = self._download_xml(
+ 'http://videomore.ru/video/tracks/%s.xml' % video_id,
+ video_id, 'Downloading video XML')
+
+ video_url = xpath_text(video, './/video_url', 'video url', fatal=True)
+ formats = self._extract_f4m_formats(video_url, video_id, f4m_id='hds')
+
+ data = self._download_json(
+ 'http://videomore.ru/video/tracks/%s.json' % video_id,
+ video_id, 'Downloading video JSON')
+
+ title = data.get('title') or data['project_title']
+ description = data.get('description') or data.get('description_raw')
+ timestamp = parse_iso8601(data.get('published_at'))
+ duration = int_or_none(data.get('duration'))
+ view_count = int_or_none(data.get('views'))
+ age_limit = parse_age_limit(data.get('min_age'))
+ thumbnails = [{
+ 'url': thumbnail,
+ } for thumbnail in data.get('big_thumbnail_urls', [])]
+
+ series = data.get('project_title')
+ episode = data.get('title')
+ episode_number = int_or_none(data.get('episode_of_season') or None)
+ season = data.get('season_title')
+ season_number = int_or_none(data.get('season_pos') or None)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'series': series,
+ 'episode': episode,
+ 'episode_number': episode_number,
+ 'season': season,
+ 'season_number': season_number,
+ 'thumbnails': thumbnails,
+ 'timestamp': timestamp,
+ 'duration': duration,
+ 'view_count': view_count,
+ 'age_limit': age_limit,
+ 'formats': formats,
+ }
+
+
+class VideomoreVideoIE(InfoExtractor):
+ IE_NAME = 'videomore:video'
+ _VALID_URL = r'https?://videomore\.ru/(?:(?:[^/]+/){2})?(?P<id>[^/?#&]+)[/?#&]*$'
+ _TESTS = [{
+ # single video with og:video:iframe
+ 'url': 'http://videomore.ru/elki_3',
+ 'info_dict': {
+ 'id': '364623',
+ 'ext': 'flv',
+ 'title': 'Ёлки 3',
+ 'description': '',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 5579,
+ 'age_limit': 6,
+ 'view_count': int,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ # season single series with og:video:iframe
+ 'url': 'http://videomore.ru/poslednii_ment/1_sezon/14_seriya',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://videomore.ru/sejchas_v_seti/serii_221-240/226_vypusk',
+ 'only_matching': True,
+ }, {
+ # single video without og:video:iframe
+ 'url': 'http://videomore.ru/marin_i_ego_druzya',
+ 'info_dict': {
+ 'id': '359073',
+ 'ext': 'flv',
+ 'title': '1 серия. Здравствуй, Аквавилль!',
+ 'description': 'md5:c6003179538b5d353e7bcd5b1372b2d7',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 754,
+ 'age_limit': 6,
+ 'view_count': int,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }]
+
+ @classmethod
+ def suitable(cls, url):
+ return False if VideomoreIE.suitable(url) else super(VideomoreVideoIE, cls).suitable(url)
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, display_id)
+
+ video_url = self._og_search_property(
+ 'video:iframe', webpage, 'video url', default=None)
+
+ if not video_url:
+ video_id = self._search_regex(
+ (r'config\s*:\s*["\']https?://videomore\.ru/video/tracks/(\d+)\.xml',
+ r'track-id=["\'](\d+)',
+ r'xcnt_product_id\s*=\s*(\d+)'), webpage, 'video id')
+ video_url = 'videomore:%s' % video_id
+
+ return self.url_result(video_url, VideomoreIE.ie_key())
+
+
+class VideomoreSeasonIE(InfoExtractor):
+ IE_NAME = 'videomore:season'
+ _VALID_URL = r'https?://videomore\.ru/(?!embed)(?P<id>[^/]+/[^/?#&]+)[/?#&]*$'
+ _TESTS = [{
+ 'url': 'http://videomore.ru/molodezhka/sezon_promo',
+ 'info_dict': {
+ 'id': 'molodezhka/sezon_promo',
+ 'title': 'Молодежка Промо',
+ },
+ 'playlist_mincount': 12,
+ }]
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, display_id)
+
+ title = self._og_search_title(webpage)
+
+ entries = [
+ self.url_result(item) for item in re.findall(
+ r'<a[^>]+href="((?:https?:)?//videomore\.ru/%s/[^/]+)"[^>]+class="widget-item-desc"'
+ % display_id, webpage)]
+
+ return self.playlist_result(entries, display_id, title)
webpage_url = 'http://videopremium.tv/' + video_id
webpage = self._download_webpage(webpage_url, video_id)
- if re.match(r"^<html><head><script[^>]*>window.location\s*=", webpage):
+ if re.match(r'^<html><head><script[^>]*>window.location\s*=', webpage):
# Download again, we need a cookie
webpage = self._download_webpage(
webpage_url, video_id,
return {
'id': video_id,
- 'url': "rtmp://e%d.md.iplay.md/play" % random.randint(1, 16),
- 'play_path': "mp4:%s.f4v" % video_id,
- 'page_url': "http://videopremium.tv/" + video_id,
- 'player_url': "http://videopremium.tv/uplayer/uppod.swf",
+ 'url': 'rtmp://e%d.md.iplay.md/play' % random.randint(1, 16),
+ 'play_path': 'mp4:%s.f4v' % video_id,
+ 'page_url': 'http://videopremium.tv/' + video_id,
+ 'player_url': 'http://videopremium.tv/uplayer/uppod.swf',
'ext': 'f4v',
'title': video_title,
}
class VideoTtIE(InfoExtractor):
+ _WORKING = False
ID_NAME = 'video.tt'
IE_DESC = 'video.tt - Your True Tube'
_VALID_URL = r'http://(?:www\.)?video\.tt/(?:(?:video|embed)/|watch_video\.php\?v=)(?P<id>[\da-zA-Z]{9})'
+++ /dev/null
-from __future__ import unicode_literals
-
-from .novamov import NovaMovIE
-
-
-class VideoWeedIE(NovaMovIE):
- IE_NAME = 'videoweed'
- IE_DESC = 'VideoWeed'
-
- _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'videoweed\.(?:es|com)'}
-
- _HOST = 'www.videoweed.es'
-
- _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
- _TITLE_REGEX = r'<h1 class="text_shadow">([^<]+)</h1>'
-
- _TEST = {
- 'url': 'http://www.videoweed.es/file/b42178afbea14',
- 'md5': 'abd31a2132947262c50429e1d16c1bfd',
- 'info_dict': {
- 'id': 'b42178afbea14',
- 'ext': 'flv',
- 'title': 'optical illusion dissapeared image magic illusion',
- 'description': ''
- },
- }
from __future__ import unicode_literals
+import itertools
+
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
class VidmeIE(InfoExtractor):
- _VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z]+)'
+ IE_NAME = 'vidme'
+ _VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z]{,5})(?:[^\da-zA-Z]|$)'
_TESTS = [{
'url': 'https://vid.me/QNB',
'md5': 'f42d05e7149aeaec5c037b17e5d3dc82',
'comment_count': comment_count,
'formats': formats,
}
+
+
+class VidmeListBaseIE(InfoExtractor):
+ # Max possible limit according to https://docs.vid.me/#api-Videos-List
+ _LIMIT = 100
+
+ def _entries(self, user_id, user_name):
+ for page_num in itertools.count(1):
+ page = self._download_json(
+ 'https://api.vid.me/videos/%s?user=%s&limit=%d&offset=%d'
+ % (self._API_ITEM, user_id, self._LIMIT, (page_num - 1) * self._LIMIT),
+ user_name, 'Downloading user %s page %d' % (self._API_ITEM, page_num))
+
+ videos = page.get('videos', [])
+ if not videos:
+ break
+
+ for video in videos:
+ video_url = video.get('full_url') or video.get('embed_url')
+ if video_url:
+ yield self.url_result(video_url, VidmeIE.ie_key())
+
+ total = int_or_none(page.get('page', {}).get('total'))
+ if total and self._LIMIT * page_num >= total:
+ break
+
+ def _real_extract(self, url):
+ user_name = self._match_id(url)
+
+ user_id = self._download_json(
+ 'https://api.vid.me/userByUsername?username=%s' % user_name,
+ user_name)['user']['user_id']
+
+ return self.playlist_result(
+ self._entries(user_id, user_name), user_id,
+ '%s - %s' % (user_name, self._TITLE))
+
+
+class VidmeUserIE(VidmeListBaseIE):
+ IE_NAME = 'vidme:user'
+ _VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z]{6,})(?!/likes)(?:[^\da-zA-Z]|$)'
+ _API_ITEM = 'list'
+ _TITLE = 'Videos'
+ _TEST = {
+ 'url': 'https://vid.me/EFARCHIVE',
+ 'info_dict': {
+ 'id': '3834632',
+ 'title': 'EFARCHIVE - %s' % _TITLE,
+ },
+ 'playlist_mincount': 238,
+ }
+
+
+class VidmeUserLikesIE(VidmeListBaseIE):
+ IE_NAME = 'vidme:user:likes'
+ _VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z]{6,})/likes'
+ _API_ITEM = 'likes'
+ _TITLE = 'Likes'
+ _TEST = {
+ 'url': 'https://vid.me/ErinAlexis/likes',
+ 'info_dict': {
+ 'id': '6483530',
+ 'title': 'ErinAlexis - %s' % _TITLE,
+ },
+ 'playlist_mincount': 415,
+ }
from __future__ import unicode_literals
from .common import InfoExtractor
+from ..utils import smuggle_url
class VidziIE(InfoExtractor):
'id': 'cghql9yq6emu',
'ext': 'mp4',
'title': 'youtube-dl test video 1\\\\2\'3/4<5\\\\6ä7↭',
+ 'uploader': 'vidzi.tv',
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
},
}
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
- video_host = self._html_search_regex(
- r'id=\'vplayer\'><img src="http://(.*?)/i', webpage,
- 'video host')
- video_hash = self._html_search_regex(
- r'\|([a-z0-9]+)\|hls\|type', webpage, 'video_hash')
- ext = self._html_search_regex(
- r'\|tracks\|([a-z0-9]+)\|', webpage, 'video ext')
- video_url = 'http://' + video_host + '/' + video_hash + '/v.' + ext
title = self._html_search_regex(
r'(?s)<h2 class="video-title">(.*?)</h2>', webpage, 'title')
+ # Vidzi now uses jwplayer, which can be handled by GenericIE
return {
+ '_type': 'url_transparent',
'id': video_id,
'title': title,
- 'url': video_url,
+ 'url': smuggle_url(url, {'to_generic': True}),
+ 'ie_key': 'Generic',
}
self._sort_formats(formats)
- synopsis = info.get('Synopsis', {})
+ synopsis = info.get('Synopsis') or {}
# Prefer title outside synopsis since it's less messy
title = (info.get('Title') or synopsis['Title']).strip()
- description = synopsis.get('Detailed') or info.get('Synopsis', {}).get('Short')
+ description = synopsis.get('Detailed') or (info.get('Synopsis') or {}).get('Short')
duration = int_or_none(info.get('Duration'))
timestamp = parse_iso8601(info.get('ReleaseDate'))
'upload_date': '20130627',
'duration': 565,
},
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
}, {
# video with invalid direct format links (HTTP 403)
'url': 'http://videolectures.net/russir2010_filippova_nlp/',
_token = None
+ _ERRORS = {
+ 'geo': 'Sorry, this content is not available in your region.',
+ 'upcoming': 'Sorry, this content is not yet available.',
+ # 'paywall': 'paywall',
+ }
+
def _prepare_call(self, path, timestamp=None, post_data=None):
path += '?' if '?' not in path else '&'
if not timestamp:
'%s returned error: %s' % (self.IE_NAME, error),
expected=True)
+ def _check_errors(self, data):
+ for reason, status in data.get('blocking', {}).items():
+ if status and reason in self._ERRORS:
+ raise ExtractorError('%s said: %s' % (
+ self.IE_NAME, self._ERRORS[reason]), expected=True)
+
def _real_initialize(self):
self._login()
'timestamp': 1321985454,
'description': 'md5:44b1e46619df3a072294645c770cef36',
'title': 'Love In Magic',
+ 'age_limit': 13,
},
}]
video = self._call_api(
'videos/%s.json' % video_id, video_id, 'Downloading video JSON')
+ self._check_errors(video)
+
title = self.dict_selection(video.get('titles', {}), 'en')
if not title:
title = 'Episode %d' % video.get('number') if video.get('type') == 'episode' else video.get('id') or video_id
r'^(\d+)[pP]$', format_id, 'height', default=None))
for protocol, format_dict in stream_dict.items():
if format_id == 'm3u8':
- formats = self._extract_m3u8_formats(
- format_dict['url'], video_id, 'mp4', m3u8_id='m3u8-%s' % protocol)
+ formats.extend(self._extract_m3u8_formats(
+ format_dict['url'], video_id, 'mp4', 'm3u8_native',
+ m3u8_id='m3u8-%s' % protocol, fatal=False))
else:
formats.append({
'url': format_dict['url'],
'containers/%s.json' % channel_id, channel_id,
'Downloading channel JSON')
+ self._check_errors(channel)
+
title = self.dict_selection(channel['titles'], 'en')
description = self.dict_selection(channel['descriptions'], 'en')
compat_urlparse,
)
from ..utils import (
+ determine_ext,
encode_dict,
ExtractorError,
InAdvancePagedList,
unsmuggle_url,
urlencode_postdata,
unescapeHTML,
+ parse_filesize,
)
def _extract_xsrft_and_vuid(self, webpage):
xsrft = self._search_regex(
- r'xsrft\s*[=:]\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)',
+ r'(?:(?P<q1>["\'])xsrft(?P=q1)\s*:|xsrft\s*[=:])\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)',
webpage, 'login token', group='xsrft')
vuid = self._search_regex(
r'["\']vuid["\']\s*:\s*(["\'])(?P<vuid>.+?)\1',
'uploader_id': 'user28849593',
},
},
+ {
+ # contains original format
+ 'url': 'https://vimeo.com/33951933',
+ 'md5': '53c688fa95a55bf4b7293d37a89c5c53',
+ 'info_dict': {
+ 'id': '33951933',
+ 'ext': 'mp4',
+ 'title': 'FOX CLASSICS - Forever Classic ID - A Full Minute',
+ 'uploader': 'The DMCI',
+ 'uploader_id': 'dmci',
+ 'upload_date': '20111220',
+ 'description': 'md5:ae23671e82d05415868f7ad1aec21147',
+ },
+ },
{
'url': 'https://vimeo.com/109815029',
'note': 'Video not completely processed, "failed" seed status',
'url': 'https://vimeo.com/groups/travelhd/videos/22439234',
'only_matching': True,
},
+ {
+ # source file returns 403: Forbidden
+ 'url': 'https://vimeo.com/7809605',
+ 'only_matching': True,
+ },
]
@staticmethod
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//player\.vimeo\.com/video/.+?)\1', webpage)
if mobj:
player_url = unescapeHTML(mobj.group('url'))
- surl = smuggle_url(player_url, {'Referer': url})
+ surl = smuggle_url(player_url, {'http_headers': {'Referer': url}})
return surl
# Look for embedded (swf embed) Vimeo player
mobj = re.search(
return mobj.group(1)
def _verify_video_password(self, url, video_id, webpage):
- password = self._downloader.params.get('videopassword', None)
+ password = self._downloader.params.get('videopassword')
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
token, vuid = self._extract_xsrft_and_vuid(webpage)
'Verifying the password', 'Wrong password')
def _verify_player_video_password(self, url, video_id):
- password = self._downloader.params.get('videopassword', None)
+ password = self._downloader.params.get('videopassword')
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option')
data = urlencode_postdata(encode_dict({'password': password}))
self._login()
def _real_extract(self, url):
- url, data = unsmuggle_url(url)
+ url, data = unsmuggle_url(url, {})
headers = std_headers
- if data is not None:
+ if 'http_headers' in data:
headers = headers.copy()
- headers.update(data)
+ headers.update(data['http_headers'])
if 'Referer' not in headers:
headers['Referer'] = url
raise ExtractorError('The author has restricted the access to this video, try with the "--referer" option')
if re.search(r'<form[^>]+?id="pw_form"', webpage) is not None:
- if data and '_video_password_verified' in data:
+ if '_video_password_verified' in data:
raise ExtractorError('video password verification failed!')
self._verify_video_password(url, video_id, webpage)
return self._real_extract(
if config.get('view') == 4:
config = self._verify_player_video_password(url, video_id)
+ if '>You rented this title.<' in webpage:
+ feature_id = config.get('video', {}).get('vod', {}).get('feature_id')
+ if feature_id and not data.get('force_feature_id', False):
+ return self.url_result(smuggle_url(
+ 'https://player.vimeo.com/player/%s' % feature_id,
+ {'force_feature_id': True}), 'Vimeo')
+
# Extract title
- video_title = config["video"]["title"]
+ video_title = config['video']['title']
# Extract uploader and uploader_id
- video_uploader = config["video"]["owner"]["name"]
- video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] if config["video"]["owner"]["url"] else None
+ video_uploader = config['video']['owner']['name']
+ video_uploader_id = config['video']['owner']['url'].split('/')[-1] if config['video']['owner']['url'] else None
# Extract video thumbnail
- video_thumbnail = config["video"].get("thumbnail")
+ video_thumbnail = config['video'].get('thumbnail')
if video_thumbnail is None:
- video_thumbs = config["video"].get("thumbs")
+ video_thumbs = config['video'].get('thumbs')
if video_thumbs and isinstance(video_thumbs, dict):
_, video_thumbnail = sorted((int(width if width.isdigit() else 0), t_url) for (width, t_url) in video_thumbs.items())[-1]
self._downloader.report_warning('Cannot find video description')
# Extract video duration
- video_duration = int_or_none(config["video"].get("duration"))
+ video_duration = int_or_none(config['video'].get('duration'))
# Extract upload date
video_upload_date = None
comment_count = None
formats = []
+ download_request = sanitized_Request('https://vimeo.com/%s?action=load_download_config' % video_id, headers={
+ 'X-Requested-With': 'XMLHttpRequest'})
+ download_data = self._download_json(download_request, video_id, fatal=False)
+ if download_data:
+ source_file = download_data.get('source_file')
+ if isinstance(source_file, dict):
+ download_url = source_file.get('download_url')
+ if download_url and not source_file.get('is_cold') and not source_file.get('is_defrosting'):
+ source_name = source_file.get('public_name', 'Original')
+ if self._is_valid_url(download_url, video_id, '%s video' % source_name):
+ ext = source_file.get('extension', determine_ext(download_url)).lower()
+ formats.append({
+ 'url': download_url,
+ 'ext': ext,
+ 'width': int_or_none(source_file.get('width')),
+ 'height': int_or_none(source_file.get('height')),
+ 'filesize': parse_filesize(source_file.get('size')),
+ 'format_id': source_name,
+ 'preference': 1,
+ })
config_files = config['video'].get('files') or config['request'].get('files', {})
for f in config_files.get('progressive', []):
video_url = f.get('url')
})
m3u8_url = config_files.get('hls', {}).get('url')
if m3u8_url:
- m3u8_formats = self._extract_m3u8_formats(
- m3u8_url, video_id, 'mp4', 'm3u8_native', 0, 'hls', fatal=False)
- if m3u8_formats:
- formats.extend(m3u8_formats)
+ formats.extend(self._extract_m3u8_formats(
+ m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
# Bitrates are completely broken. Single m3u8 may contain entries in kbps and bps
# at the same time without actual units specified. This lead to wrong sorting.
- self._sort_formats(formats, field_preference=('height', 'width', 'fps', 'format_id'))
+ self._sort_formats(formats, field_preference=('preference', 'height', 'width', 'fps', 'format_id'))
subtitles = {}
text_tracks = config['request'].get('text_tracks')
if not login_form:
return webpage
- password = self._downloader.params.get('videopassword', None)
+ password = self._downloader.params.get('videopassword')
if password is None:
raise ExtractorError('This album is protected by a password, use the --video-password option', expected=True)
fields = self._hidden_inputs(login_form)
_TEST = {
'url': 'https://vimeo.com/user755559/likes/',
'playlist_mincount': 293,
- "info_dict": {
+ 'info_dict': {
'id': 'user755559_likes',
- "description": "See all the videos urza likes",
- "title": 'Videos urza likes',
+ 'description': 'See all the videos urza likes',
+ 'title': 'Videos urza likes',
},
}
class VineUserIE(InfoExtractor):
IE_NAME = 'vine:user'
_VALID_URL = r'(?:https?://)?vine\.co/(?P<u>u/)?(?P<user>[^/]+)/?(\?.*)?$'
- _VINE_BASE_URL = "https://vine.co/"
+ _VINE_BASE_URL = 'https://vine.co/'
_TESTS = [
{
'url': 'https://vine.co/Visa',
user = mobj.group('user')
u = mobj.group('u')
- profile_url = "%sapi/users/profiles/%s%s" % (
+ profile_url = '%sapi/users/profiles/%s%s' % (
self._VINE_BASE_URL, 'vanity/' if not u else '', user)
profile_data = self._download_json(
profile_url, user, note='Downloading user profile data')
user_id = profile_data['data']['userId']
timeline_data = []
for pagenum in itertools.count(1):
- timeline_url = "%sapi/timelines/users/%s?page=%s&size=100" % (
+ timeline_url = '%sapi/timelines/users/%s?page=%s&size=100' % (
self._VINE_BASE_URL, user_id, pagenum)
timeline_page = self._download_json(
timeline_url, user, note='Downloading page %d' % pagenum)
unified_strdate,
)
from .vimeo import VimeoIE
+from .pladform import PladformIE
class VKIE(InfoExtractor):
# vk wrapper
'url': 'http://www.biqle.ru/watch/847655_160197695',
'only_matching': True,
+ },
+ {
+ # pladform embed
+ 'url': 'https://vk.com/video-76116461_171554880',
+ 'only_matching': True,
}
]
if vimeo_url is not None:
return self.url_result(vimeo_url)
+ pladform_url = PladformIE._extract_url(info_page)
+ if pladform_url:
+ return self.url_result(pladform_url)
+
m_rutube = re.search(
- r'\ssrc="((?:https?:)?//rutube\.ru\\?/video\\?/embed(?:.*?))\\?"', info_page)
+ r'\ssrc="((?:https?:)?//rutube\.ru\\?/(?:video|play)\\?/embed(?:.*?))\\?"', info_page)
if m_rutube is not None:
- self.to_screen('rutube video detected')
rutube_url = self._proto_relative_url(
m_rutube.group(1).replace('\\', ''))
return self.url_result(rutube_url)
class VKUserVideosIE(InfoExtractor):
IE_NAME = 'vk:uservideos'
IE_DESC = "VK - User's Videos"
- _VALID_URL = r'https?://vk\.com/videos(?P<id>-?[0-9]+)$'
+ _VALID_URL = r'https?://vk\.com/videos(?P<id>-?[0-9]+)(?!\?.*\bz=video)(?:[/?#&]|$)'
_TEMPLATE_URL = 'https://vk.com/videos'
_TESTS = [{
'url': 'http://vk.com/videos205387401',
}, {
'url': 'http://vk.com/videos-77521',
'only_matching': True,
+ }, {
+ 'url': 'http://vk.com/videos-97664626?section=all',
+ 'only_matching': True,
}]
def _real_extract(self, url):
# coding: utf-8
from __future__ import unicode_literals
-import hmac
-from hashlib import sha1
-from base64 import b64encode
-from time import time
-
from .common import InfoExtractor
from ..utils import (
- ExtractorError,
- determine_ext
+ dict_get,
+ float_or_none,
+ int_or_none,
)
from ..compat import compat_urllib_parse
class VLiveIE(InfoExtractor):
IE_NAME = 'vlive'
- # www.vlive.tv/video/ links redirect to m.vlive.tv/video/ for mobile devices
- _VALID_URL = r'https?://(?:(www|m)\.)?vlive\.tv/video/(?P<id>[0-9]+)'
+ _VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/video/(?P<id>[0-9]+)'
_TEST = {
- 'url': 'http://m.vlive.tv/video/1326',
+ 'url': 'http://www.vlive.tv/video/1326',
'md5': 'cc7314812855ce56de70a06a27314983',
'info_dict': {
'id': '1326',
'ext': 'mp4',
- 'title': '[V] Girl\'s Day\'s Broadcast',
- 'creator': 'Girl\'s Day',
+ 'title': "[V] Girl's Day's Broadcast",
+ 'creator': "Girl's Day",
+ 'view_count': int,
},
}
- _SECRET = 'rFkwZet6pqk1vQt6SxxUkAHX7YL3lmqzUMrU4IDusTo4jEBdtOhNfT4BYYAdArwH'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
- 'http://m.vlive.tv/video/%s' % video_id,
- video_id, note='Download video page')
+ 'http://www.vlive.tv/video/%s' % video_id, video_id)
- title = self._og_search_title(webpage)
- thumbnail = self._og_search_thumbnail(webpage)
- creator = self._html_search_regex(
- r'<span[^>]+class="name">([^<>]+)</span>', webpage, 'creator')
+ long_video_id = self._search_regex(
+ r'vlive\.tv\.video\.ajax\.request\.handler\.init\(\s*"[0-9]+"\s*,\s*"[^"]*"\s*,\s*"([^"]+)"',
+ webpage, 'long video id')
- url = 'http://global.apis.naver.com/globalV/globalV/vod/%s/playinfo?' % video_id
- msgpad = '%.0f' % (time() * 1000)
- md = b64encode(
- hmac.new(self._SECRET.encode('ascii'),
- (url[:255] + msgpad).encode('ascii'), sha1).digest()
- )
- url += '&' + compat_urllib_parse.urlencode({'msgpad': msgpad, 'md': md})
- playinfo = self._download_json(url, video_id, 'Downloading video json')
+ key = self._search_regex(
+ r'vlive\.tv\.video\.ajax\.request\.handler\.init\(\s*"[0-9]+"\s*,\s*"[^"]*"\s*,\s*"[^"]+"\s*,\s*"([^"]+)"',
+ webpage, 'key')
- if playinfo.get('message', '') != 'success':
- raise ExtractorError(playinfo.get('message', 'JSON request unsuccessful'))
+ title = self._og_search_title(webpage)
- if not playinfo.get('result'):
- raise ExtractorError('No videos found.')
+ playinfo = self._download_json(
+ 'http://global.apis.naver.com/rmcnmv/rmcnmv/vod_play_videoInfo.json?%s'
+ % compat_urllib_parse.urlencode({
+ 'videoId': long_video_id,
+ 'key': key,
+ 'ptc': 'http',
+ 'doct': 'json', # document type (xml or json)
+ 'cpt': 'vtt', # captions type (vtt or ttml)
+ }), video_id)
- formats = []
- for vid in playinfo['result'].get('videos', {}).get('list', []):
- formats.append({
- 'url': vid['source'],
- 'ext': 'mp4',
- 'abr': vid.get('bitrate', {}).get('audio'),
- 'vbr': vid.get('bitrate', {}).get('video'),
- 'format_id': vid['encodingOption']['name'],
- 'height': vid.get('height'),
- 'width': vid.get('width'),
- })
+ formats = [{
+ 'url': vid['source'],
+ 'format_id': vid.get('encodingOption', {}).get('name'),
+ 'abr': float_or_none(vid.get('bitrate', {}).get('audio')),
+ 'vbr': float_or_none(vid.get('bitrate', {}).get('video')),
+ 'width': int_or_none(vid.get('encodingOption', {}).get('width')),
+ 'height': int_or_none(vid.get('encodingOption', {}).get('height')),
+ 'filesize': int_or_none(vid.get('size')),
+ } for vid in playinfo.get('videos', {}).get('list', []) if vid.get('source')]
self._sort_formats(formats)
+ thumbnail = self._og_search_thumbnail(webpage)
+ creator = self._html_search_regex(
+ r'<div[^>]+class="info_area"[^>]*>\s*<strong[^>]+class="name"[^>]*>([^<]+)</strong>',
+ webpage, 'creator', fatal=False)
+
+ view_count = int_or_none(playinfo.get('meta', {}).get('count'))
+
subtitles = {}
- for caption in playinfo['result'].get('captions', {}).get('list', []):
- subtitles[caption['language']] = [
- {'ext': determine_ext(caption['source'], default_ext='vtt'),
- 'url': caption['source']}]
+ for caption in playinfo.get('captions', {}).get('list', []):
+ lang = dict_get(caption, ('language', 'locale', 'country', 'label'))
+ if lang and caption.get('source'):
+ subtitles[lang] = [{
+ 'ext': 'vtt',
+ 'url': caption['source']}]
return {
'id': video_id,
'title': title,
'creator': creator,
'thumbnail': thumbnail,
+ 'view_count': view_count,
'formats': formats,
'subtitles': subtitles,
}
from .common import InfoExtractor
from ..compat import compat_urllib_parse
-from ..utils import sanitized_Request
+from ..utils import (
+ ExtractorError,
+ NO_DEFAULT,
+ sanitized_Request,
+)
class VodlockerIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?vodlocker\.com/(?P<id>[0-9a-zA-Z]+)(?:\..*?)?'
+ _VALID_URL = r'https?://(?:www\.)?vodlocker\.(?:com|city)/(?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:\..*?)?'
_TESTS = [{
'url': 'http://vodlocker.com/e8wvyzz4sl42',
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
+ if any(p in webpage for p in (
+ '>THIS FILE WAS DELETED<',
+ '>File Not Found<',
+ 'The file you were looking for could not be found, sorry for any inconvenience.<')):
+ raise ExtractorError('Video %s does not exist' % video_id, expected=True)
+
fields = self._hidden_inputs(webpage)
if fields['op'] == 'download1':
webpage = self._download_webpage(
req, video_id, 'Downloading video page')
+ def extract_file_url(html, default=NO_DEFAULT):
+ return self._search_regex(
+ r'file:\s*"(http[^\"]+)",', html, 'file url', default=default)
+
+ video_url = extract_file_url(webpage, default=None)
+
+ if not video_url:
+ embed_url = self._search_regex(
+ r'<iframe[^>]+src=(["\'])(?P<url>(?:https?://)?vodlocker\.(?:com|city)/embed-.+?)\1',
+ webpage, 'embed url', group='url')
+ embed_webpage = self._download_webpage(
+ embed_url, video_id, 'Downloading embed webpage')
+ video_url = extract_file_url(embed_webpage)
+ thumbnail_webpage = embed_webpage
+ else:
+ thumbnail_webpage = webpage
+
title = self._search_regex(
r'id="file_title".*?>\s*(.*?)\s*<(?:br|span)', webpage, 'title')
thumbnail = self._search_regex(
- r'image:\s*"(http[^\"]+)",', webpage, 'thumbnail')
- url = self._search_regex(
- r'file:\s*"(http[^\"]+)",', webpage, 'file url')
+ r'image:\s*"(http[^\"]+)",', thumbnail_webpage, 'thumbnail', fatal=False)
formats = [{
'format_id': 'sd',
- 'url': url,
+ 'url': video_url,
}]
return {
class VRTIE(InfoExtractor):
- _VALID_URL = r'https?://(?:deredactie|sporza|cobra)\.be/cm/(?:[^/]+/)+(?P<id>[^/]+)/*'
+ _VALID_URL = r'https?://(?:deredactie|sporza|cobra(?:\.canvas)?)\.be/cm/(?:[^/]+/)+(?P<id>[^/]+)/*'
_TESTS = [
# deredactie.be
{
'duration': 661,
}
},
+ {
+ 'url': 'http://cobra.canvas.be/cm/cobra/videozone/rubriek/film-videozone/1.2377055',
+ 'only_matching': True,
+ }
]
def _real_extract(self, url):
if mobj:
formats.extend(self._extract_m3u8_formats(
'%s/%s' % (mobj.group('server'), mobj.group('path')),
- video_id, 'mp4'))
+ video_id, 'mp4', m3u8_id='hls', fatal=False))
mobj = re.search(r'data-video-src="(?P<src>[^"]+)"', webpage)
if mobj:
formats.extend(self._extract_f4m_formats(
- '%s/manifest.f4m' % mobj.group('src'), video_id))
+ '%s/manifest.f4m' % mobj.group('src'),
+ video_id, f4m_id='hds', fatal=False))
+
+ if not formats and 'data-video-geoblocking="true"' in webpage:
+ self.raise_geo_restricted('This video is only available in Belgium')
+
self._sort_formats(formats)
title = self._og_search_title(webpage)
class WatIE(InfoExtractor):
- _VALID_URL = r'http://www\.wat\.tv/video/(?P<display_id>.*)-(?P<short_id>.*?)_.*?\.html'
+ _VALID_URL = r'(?:wat:(?P<real_id>\d{8})|http://www\.wat\.tv/video/(?P<display_id>.*)-(?P<short_id>.*?)_.*?\.html)'
IE_NAME = 'wat.tv'
_TESTS = [
{
def real_id_for_chapter(chapter):
return chapter['tc_start'].split('-')[0]
mobj = re.match(self._VALID_URL, url)
- short_id = mobj.group('short_id')
display_id = mobj.group('display_id')
- webpage = self._download_webpage(url, display_id or short_id)
- real_id = self._search_regex(r'xtpage = ".*-(.*?)";', webpage, 'real id')
+ real_id = mobj.group('real_id')
+ if not real_id:
+ short_id = mobj.group('short_id')
+ webpage = self._download_webpage(url, display_id or short_id)
+ real_id = self._search_regex(r'xtpage = ".*-(.*?)";', webpage, 'real id')
video_info = self.download_video_info(real_id)
compat_urlparse,
)
from ..utils import (
- determine_ext,
unified_strdate,
+ qualities,
)
'params': {
'skip_download': True,
},
+ 'skip': 'Page Not Found',
},
{
'url': 'http://www1.wdr.de/themen/av/videomargaspiegelisttot101-videoplayer.html',
'params': {
'skip_download': True,
},
+ 'skip': 'Page Not Found',
},
{
'url': 'http://www1.wdr.de/themen/kultur/audioerlebtegeschichtenmargaspiegel100-audioplayer.html',
'upload_date': '20140717',
'is_live': False
},
+ 'skip': 'Page Not Found',
},
{
'url': 'http://www1.wdr.de/mediathek/video/sendungen/quarks_und_co/filterseite-quarks-und-co100.html',
'url': 'http://www1.wdr.de/mediathek/video/livestream/index.html',
'info_dict': {
'id': 'mdb-103364',
- 'title': 're:^WDR Fernsehen [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
+ 'title': 're:^WDR Fernsehen Live [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'md5:ae2ff888510623bf8d4b115f95a9b7c9',
'ext': 'flv',
- 'upload_date': '20150212',
+ 'upload_date': '20150101',
'is_live': True
},
'params': {
if mobj.group('player') is None:
entries = [
self.url_result(page_url + href, 'WDR')
- for href in re.findall(r'<a href="/?(.+?%s\.html)" rel="nofollow"' % self._PLAYER_REGEX, webpage)
+ for href in re.findall(
+ r'<a href="/?(.+?%s\.html)" rel="nofollow"' % self._PLAYER_REGEX,
+ webpage)
]
if entries: # Playlist page
note='Downloading playlist page %d' % page_num)
return self.playlist_result(entries, page_id)
- flashvars = compat_parse_qs(
- self._html_search_regex(r'<param name="flashvars" value="([^"]+)"', webpage, 'flashvars'))
+ flashvars = compat_parse_qs(self._html_search_regex(
+ r'<param name="flashvars" value="([^"]+)"', webpage, 'flashvars'))
page_id = flashvars['trackerClipId'][0]
video_url = flashvars['dslSrc'][0]
if 'trackerClipAirTime' in flashvars:
upload_date = flashvars['trackerClipAirTime'][0]
else:
- upload_date = self._html_search_meta('DC.Date', webpage, 'upload date')
+ upload_date = self._html_search_meta(
+ 'DC.Date', webpage, 'upload date')
if upload_date:
upload_date = unified_strdate(upload_date)
+ formats = []
+ preference = qualities(['S', 'M', 'L', 'XL'])
+
if video_url.endswith('.f4m'):
- video_url += '?hdcore=3.2.0&plugin=aasp-3.2.0.77.18'
- ext = 'flv'
+ formats.extend(self._extract_f4m_formats(
+ video_url + '?hdcore=3.2.0&plugin=aasp-3.2.0.77.18', page_id,
+ f4m_id='hds', fatal=False))
elif video_url.endswith('.smil'):
- fmt = self._extract_smil_formats(video_url, page_id)[0]
- video_url = fmt['url']
- sep = '&' if '?' in video_url else '?'
- video_url += sep
- video_url += 'hdcore=3.3.0&plugin=aasp-3.3.0.99.43'
- ext = fmt['ext']
+ formats.extend(self._extract_smil_formats(
+ video_url, page_id, False, {
+ 'hdcore': '3.3.0',
+ 'plugin': 'aasp-3.3.0.99.43',
+ }))
else:
- ext = determine_ext(video_url)
+ formats.append({
+ 'url': video_url,
+ 'http_headers': {
+ 'User-Agent': 'mobile',
+ },
+ })
+
+ m3u8_url = self._search_regex(
+ r'rel="adaptiv"[^>]+href="([^"]+)"',
+ webpage, 'm3u8 url', default=None)
+ if m3u8_url:
+ formats.extend(self._extract_m3u8_formats(
+ m3u8_url, page_id, 'mp4', 'm3u8_native',
+ m3u8_id='hls', fatal=False))
+
+ direct_urls = re.findall(
+ r'rel="web(S|M|L|XL)"[^>]+href="([^"]+)"', webpage)
+ if direct_urls:
+ for quality, video_url in direct_urls:
+ formats.append({
+ 'url': video_url,
+ 'preference': preference(quality),
+ 'http_headers': {
+ 'User-Agent': 'mobile',
+ },
+ })
+
+ self._sort_formats(formats)
description = self._html_search_meta('Description', webpage, 'description')
return {
'id': page_id,
- 'url': video_url,
- 'ext': ext,
+ 'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
--- /dev/null
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class WeiqiTVIE(InfoExtractor):
+ IE_DESC = 'WQTV'
+ _VALID_URL = r'http://www\.weiqitv\.com/index/video_play\?videoId=(?P<id>[A-Za-z0-9]+)'
+
+ _TESTS = [{
+ 'url': 'http://www.weiqitv.com/index/video_play?videoId=53c744f09874f0e76a8b46f3',
+ 'md5': '26450599afd64c513bc77030ad15db44',
+ 'info_dict': {
+ 'id': '53c744f09874f0e76a8b46f3',
+ 'ext': 'mp4',
+ 'title': '2013年度盘点',
+ },
+ }, {
+ 'url': 'http://www.weiqitv.com/index/video_play?videoId=567379a2d4c36cca518b4569',
+ 'info_dict': {
+ 'id': '567379a2d4c36cca518b4569',
+ 'ext': 'mp4',
+ 'title': '民国围棋史',
+ },
+ }, {
+ 'url': 'http://www.weiqitv.com/index/video_play?videoId=5430220a9874f088658b4567',
+ 'info_dict': {
+ 'id': '5430220a9874f088658b4567',
+ 'ext': 'mp4',
+ 'title': '二路托过的手段和运用',
+ },
+ }]
+
+ def _real_extract(self, url):
+ media_id = self._match_id(url)
+ page = self._download_webpage(url, media_id)
+
+ info_json_str = self._search_regex(
+ 'var\s+video\s*=\s*(.+});', page, 'info json str')
+ info_json = self._parse_json(info_json_str, media_id)
+
+ letvcloud_url = self._search_regex(
+ 'var\s+letvurl\s*=\s*"([^"]+)', page, 'letvcloud url')
+
+ return {
+ '_type': 'url_transparent',
+ 'ie_key': 'LetvCloud',
+ 'url': letvcloud_url,
+ 'title': info_json['name'],
+ 'id': media_id,
+ }
class WimpIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?wimp\.com/(?P<id>[^/]+)/'
+ _VALID_URL = r'http://(?:www\.)?wimp\.com/(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.wimp.com/maruexhausted/',
'md5': 'ee21217ffd66d058e8b16be340b74883',
def _real_extract(self, url):
video_id = self._match_id(url)
+
webpage = self._download_webpage(url, video_id)
- video_url = self._search_regex(
- [r"[\"']file[\"']\s*[:,]\s*[\"'](.+?)[\"']", r"videoId\s*:\s*[\"']([^\"']+)[\"']"],
- webpage, 'video URL')
- if YoutubeIE.suitable(video_url):
- self.to_screen('Found YouTube video')
+
+ youtube_id = self._search_regex(
+ r"videoId\s*:\s*[\"']([0-9A-Za-z_-]{11})[\"']",
+ webpage, 'video URL', default=None)
+ if youtube_id:
return {
'_type': 'url',
- 'url': video_url,
+ 'url': youtube_id,
'ie_key': YoutubeIE.ie_key(),
}
+ video_url = self._search_regex(
+ r'<video[^>]+>\s*<source[^>]+src=(["\'])(?P<url>.+?)\1',
+ webpage, 'video URL', group='url')
+
return {
'id': video_id,
'url': video_url,
class WorldStarHipHopIE(InfoExtractor):
_VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/(?:videos|android)/video\.php\?v=(?P<id>.*)'
_TESTS = [{
- "url": "http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO",
- "md5": "9d04de741161603bf7071bbf4e883186",
- "info_dict": {
- "id": "wshh6a7q1ny0G34ZwuIO",
- "ext": "mp4",
- "title": "KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!"
+ 'url': 'http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO',
+ 'md5': '9d04de741161603bf7071bbf4e883186',
+ 'info_dict': {
+ 'id': 'wshh6a7q1ny0G34ZwuIO',
+ 'ext': 'mp4',
+ 'title': 'KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!'
}
}, {
'url': 'http://m.worldstarhiphop.com/android/video.php?v=wshh6a7q1ny0G34ZwuIO',
'info_dict': {
'id': 'wshh6a7q1ny0G34ZwuIO',
'ext': 'mp4',
- "title": "KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!"
+ 'title': 'KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!'
}
}]
from .common import InfoExtractor
from ..utils import (
- unified_strdate,
- str_to_int,
+ float_or_none,
int_or_none,
- parse_duration,
+ unified_strdate,
)
'title': 'FemaleAgent Shy beauty takes the bait',
'upload_date': '20121014',
'uploader': 'Ruseful2011',
- 'duration': 893,
+ 'duration': 893.52,
'age_limit': 18,
}
},
'title': 'Britney Spears Sexy Booty',
'upload_date': '20130914',
'uploader': 'jojo747400',
- 'duration': 200,
+ 'duration': 200.48,
'age_limit': 18,
}
},
webpage = self._download_webpage(mrss_url, video_id)
title = self._html_search_regex(
- [r'<title>(?P<title>.+?)(?:, (?:[^,]+? )?Porn: xHamster| - xHamster\.com)</title>',
- r'<h1>([^<]+)</h1>'], webpage, 'title')
+ [r'<h1[^>]*>([^<]+)</h1>',
+ r'<meta[^>]+itemprop=".*?caption.*?"[^>]+content="(.+?)"',
+ r'<title[^>]*>(.+?)(?:,\s*[^,]*?\s*Porn\s*[^,]*?:\s*xHamster[^<]*| - xHamster\.com)</title>'],
+ webpage, 'title')
# Only a few videos have an description
mobj = re.search(r'<span>Description: </span>([^<]+)', webpage)
description = mobj.group(1) if mobj else None
- upload_date = self._html_search_regex(r'hint=\'(\d{4}-\d{2}-\d{2}) \d{2}:\d{2}:\d{2} [A-Z]{3,4}\'',
- webpage, 'upload date', fatal=False)
- if upload_date:
- upload_date = unified_strdate(upload_date)
+ upload_date = unified_strdate(self._search_regex(
+ r'hint=["\'](\d{4}-\d{2}-\d{2}) \d{2}:\d{2}:\d{2} [A-Z]{3,4}',
+ webpage, 'upload date', fatal=False))
uploader = self._html_search_regex(
- r"<a href='[^']+xhamster\.com/user/[^>]+>(?P<uploader>[^<]+)",
+ r'<span[^>]+itemprop=["\']author[^>]+><a[^>]+href=["\'].+?xhamster\.com/user/[^>]+>(?P<uploader>.+?)</a>',
webpage, 'uploader', default='anonymous')
thumbnail = self._search_regex(
r'''<video[^>]+poster=(?P<q>["'])(?P<thumbnail>.+?)(?P=q)[^>]*>'''],
webpage, 'thumbnail', fatal=False, group='thumbnail')
- duration = parse_duration(self._html_search_regex(r'<span>Runtime:</span> (\d+:\d+)</div>',
- webpage, 'duration', fatal=False))
+ duration = float_or_none(self._search_regex(
+ r'(["\'])duration\1\s*:\s*(["\'])(?P<duration>.+?)\2',
+ webpage, 'duration', fatal=False, group='duration'))
- view_count = self._html_search_regex(r'<span>Views:</span> ([^<]+)</div>', webpage, 'view count', fatal=False)
- if view_count:
- view_count = str_to_int(view_count)
+ view_count = int_or_none(self._search_regex(
+ r'content=["\']User(?:View|Play)s:(\d+)',
+ webpage, 'view count', fatal=False))
mobj = re.search(r"hint='(?P<likecount>\d+) Likes / (?P<dislikecount>\d+) Dislikes'", webpage)
(like_count, dislike_count) = (mobj.group('likecount'), mobj.group('dislikecount')) if mobj else (None, None)
'only_matching': True,
}]
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- partner_id = mobj.group('partner_id')
- video_id = mobj.group('id')
-
+ def _extract_video_info(self, partner_id, video_id):
data = self._download_xml(
'http://frontend.xstream.dk/%s/feed/video/?platform=web&id=%s'
% (partner_id, video_id),
formats.append({
'url': link.get('href'),
'format_id': link.get('rel'),
+ 'preference': 1,
})
thumbnails = [{
'formats': formats,
'thumbnails': thumbnails,
}
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ partner_id = mobj.group('partner_id')
+ video_id = mobj.group('id')
+
+ return self._extract_video_info(partner_id, video_id)
from __future__ import unicode_literals
+import itertools
import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import (
- parse_duration,
+ int_or_none,
+ orderedSet,
sanitized_Request,
str_to_int,
)
class XTubeIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?(?P<url>xtube\.com/watch\.php\?v=(?P<id>[^/?&#]+))'
- _TEST = {
+ _VALID_URL = r'(?:xtube:|https?://(?:www\.)?xtube\.com/(?:watch\.php\?.*\bv=|video-watch/(?P<display_id>[^/]+)-))(?P<id>[^/?&#]+)'
+
+ _TESTS = [{
+ # old URL schema
'url': 'http://www.xtube.com/watch.php?v=kVTUy_G222_',
'md5': '092fbdd3cbe292c920ef6fc6a8a9cdab',
'info_dict': {
'duration': 450,
'age_limit': 18,
}
- }
+ }, {
+ # new URL schema
+ 'url': 'http://www.xtube.com/video-watch/strange-erotica-625837',
+ 'only_matching': True,
+ }, {
+ 'url': 'xtube:625837',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
- video_id = self._match_id(url)
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ display_id = mobj.group('display_id')
+
+ if not display_id:
+ display_id = video_id
+ url = 'http://www.xtube.com/watch.php?v=%s' % video_id
req = sanitized_Request(url)
- req.add_header('Cookie', 'age_verified=1')
- webpage = self._download_webpage(req, video_id)
-
- video_title = self._html_search_regex(
- r'<p class="title">([^<]+)', webpage, 'title')
- video_uploader = self._html_search_regex(
- [r"var\s+contentOwnerId\s*=\s*'([^']+)",
- r'By:\s*<a href="/community/profile\.php\?user=([^"]+)'],
+ req.add_header('Cookie', 'age_verified=1; cookiesAccepted=1')
+ webpage = self._download_webpage(req, display_id)
+
+ flashvars = self._parse_json(
+ self._search_regex(
+ r'xt\.playerOps\s*=\s*({.+?});', webpage, 'player ops'),
+ video_id)['flashvars']
+
+ title = flashvars.get('title') or self._search_regex(
+ r'<h1>([^<]+)</h1>', webpage, 'title')
+ video_url = compat_urllib_parse_unquote(flashvars['video_url'])
+ duration = int_or_none(flashvars.get('video_duration'))
+
+ uploader = self._search_regex(
+ r'<input[^>]+name="contentOwnerId"[^>]+value="([^"]+)"',
webpage, 'uploader', fatal=False)
- video_description = self._html_search_regex(
- r'<p class="fieldsDesc">([^<]+)',
- webpage, 'description', fatal=False)
- duration = parse_duration(self._html_search_regex(
- r'<span class="bold">Runtime:</span> ([^<]+)</p>',
- webpage, 'duration', fatal=False))
- view_count = str_to_int(self._html_search_regex(
- r'<span class="bold">Views:</span> ([\d,\.]+)</p>',
+ description = self._search_regex(
+ r'</h1>\s*<p>([^<]+)', webpage, 'description', fatal=False)
+ view_count = str_to_int(self._search_regex(
+ r'<dt>Views:</dt>\s*<dd>([\d,\.]+)</dd>',
webpage, 'view count', fatal=False))
comment_count = str_to_int(self._html_search_regex(
- r'<div id="commentBar">([\d,\.]+) Comments</div>',
+ r'>Comments? \(([\d,\.]+)\)<',
webpage, 'comment count', fatal=False))
- formats = []
- for format_id, video_url in re.findall(
- r'flashvars\.quality_(.+?)\s*=\s*"([^"]+)"', webpage):
- fmt = {
- 'url': compat_urllib_parse_unquote(video_url),
- 'format_id': format_id,
- }
- m = re.search(r'^(?P<height>\d+)[pP]', format_id)
- if m:
- fmt['height'] = int(m.group('height'))
- formats.append(fmt)
-
- if not formats:
- video_url = compat_urllib_parse_unquote(self._search_regex(
- r'flashvars\.video_url\s*=\s*"([^"]+)"',
- webpage, 'video URL'))
- formats.append({'url': video_url})
-
- self._sort_formats(formats)
-
return {
'id': video_id,
- 'title': video_title,
- 'uploader': video_uploader,
- 'description': video_description,
+ 'display_id': display_id,
+ 'url': video_url,
+ 'title': title,
+ 'description': description,
+ 'uploader': uploader,
'duration': duration,
'view_count': view_count,
'comment_count': comment_count,
- 'formats': formats,
'age_limit': 18,
}
class XTubeUserIE(InfoExtractor):
IE_DESC = 'XTube user profile'
- _VALID_URL = r'https?://(?:www\.)?xtube\.com/community/profile\.php\?(.*?)user=(?P<username>[^&#]+)(?:$|[&#])'
+ _VALID_URL = r'https?://(?:www\.)?xtube\.com/profile/(?P<id>[^/]+-\d+)'
_TEST = {
- 'url': 'http://www.xtube.com/community/profile.php?user=greenshowers',
+ 'url': 'http://www.xtube.com/profile/greenshowers-4056496',
'info_dict': {
- 'id': 'greenshowers',
+ 'id': 'greenshowers-4056496',
'age_limit': 18,
},
'playlist_mincount': 155,
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- username = mobj.group('username')
-
- profile_page = self._download_webpage(
- url, username, note='Retrieving profile page')
-
- video_count = int(self._search_regex(
- r'<strong>%s\'s Videos \(([0-9]+)\)</strong>' % username, profile_page,
- 'video count'))
-
- PAGE_SIZE = 25
- urls = []
- page_count = (video_count + PAGE_SIZE + 1) // PAGE_SIZE
- for n in range(1, page_count + 1):
- lpage_url = 'http://www.xtube.com/user_videos.php?page=%d&u=%s' % (n, username)
- lpage = self._download_webpage(
- lpage_url, username,
- note='Downloading page %d/%d' % (n, page_count))
- urls.extend(
- re.findall(r'addthis:url="([^"]+)"', lpage))
-
- return {
- '_type': 'playlist',
- 'id': username,
- 'age_limit': 18,
- 'entries': [{
- '_type': 'url',
- 'url': eurl,
- 'ie_key': 'XTube',
- } for eurl in urls]
- }
+ user_id = self._match_id(url)
+
+ entries = []
+ for pagenum in itertools.count(1):
+ request = sanitized_Request(
+ 'http://www.xtube.com/profile/%s/videos/%d' % (user_id, pagenum),
+ headers={
+ 'Cookie': 'popunder=4',
+ 'X-Requested-With': 'XMLHttpRequest',
+ 'Referer': url,
+ })
+
+ page = self._download_json(
+ request, user_id, 'Downloading videos JSON page %d' % pagenum)
+
+ html = page.get('html')
+ if not html:
+ break
+
+ for video_id in orderedSet([video_id for _, video_id in re.findall(
+ r'data-plid=(["\'])(.+?)\1', html)]):
+ entries.append(self.url_result('xtube:%s' % video_id, XTubeIE.ie_key()))
+
+ page_count = int_or_none(page.get('pageCount'))
+ if not page_count or pagenum == page_count:
+ break
+
+ playlist = self.playlist_result(entries, user_id)
+ playlist['age_limit'] = 18
+ return playlist
},
}, {
# Video with only one format
- 'url': 'http://vlog.xuite.net/play/TkRZNjhULTM0NDE2MjkuZmx2',
- 'md5': 'c45737fc8ac5dc8ac2f92ecbcecf505e',
+ 'url': 'http://vlog.xuite.net/play/WUxxR2xCLTI1OTI1MDk5LmZsdg==',
+ 'md5': '21f7b39c009b5a4615b4463df6eb7a46',
'info_dict': {
- 'id': '3441629',
+ 'id': '25925099',
'ext': 'mp4',
- 'title': '孫燕姿 - 眼淚成詩',
+ 'title': 'BigBuckBunny_320x180',
'thumbnail': 're:^https?://.*\.jpg$',
- 'duration': 217.399,
- 'timestamp': 1299383640,
- 'upload_date': '20110306',
- 'uploader': 'Valen',
- 'uploader_id': '10400126',
- 'categories': ['影視娛樂'],
+ 'duration': 596.458,
+ 'timestamp': 1454242500,
+ 'upload_date': '20160131',
+ 'uploader': 'yan12125',
+ 'uploader_id': '12158353',
+ 'categories': ['個人短片'],
+ 'description': 'http://download.blender.org/peach/bigbuckbunny_movies/BigBuckBunny_320x180.mp4',
},
}, {
# Video with two formats
'description': 'md5:8fc39608213295748e1e289807838c97',
'duration': 1646,
},
- }
+ }, {
+ # it uses an alias to get the video_id
+ 'url': 'https://www.yahoo.com/movies/the-stars-of-daddys-home-have-very-different-212843197.html',
+ 'info_dict': {
+ 'id': '40eda9c8-8e5f-3552-8745-830f67d0c737',
+ 'ext': 'mp4',
+ 'title': 'Will Ferrell & Mark Wahlberg Are Pro-Spanking',
+ 'description': 'While they play feuding fathers in \'Daddy\'s Home,\' star Will Ferrell & Mark Wahlberg share their true feelings on parenthood.',
+ },
+ },
]
def _real_extract(self, url):
r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE,
default=None)
if items_json is None:
- CONTENT_ID_REGEXES = [
- r'YUI\.namespace\("Media"\)\.CONTENT_ID\s*=\s*"([^"]+)"',
- r'root\.App\.Cache\.context\.videoCache\.curVideo = \{"([^"]+)"',
- r'"first_videoid"\s*:\s*"([^"]+)"',
- r'%s[^}]*"ccm_id"\s*:\s*"([^"]+)"' % re.escape(page_id),
- ]
- video_id = self._search_regex(CONTENT_ID_REGEXES, webpage, 'content ID')
+ alias = self._search_regex(
+ r'"aliases":{"video":"(.*?)"', webpage, 'alias', default=None)
+ if alias is not None:
+ alias_info = self._download_json(
+ 'https://www.yahoo.com/_td/api/resource/VideoService.videos;video_aliases=["%s"]' % alias,
+ display_id, 'Downloading alias info')
+ video_id = alias_info[0]['id']
+ else:
+ CONTENT_ID_REGEXES = [
+ r'YUI\.namespace\("Media"\)\.CONTENT_ID\s*=\s*"([^"]+)"',
+ r'root\.App\.Cache\.context\.videoCache\.curVideo = \{"([^"]+)"',
+ r'"first_videoid"\s*:\s*"([^"]+)"',
+ r'%s[^}]*"ccm_id"\s*:\s*"([^"]+)"' % re.escape(page_id),
+ r'<article[^>]data-uuid=["\']([^"\']+)',
+ r'yahoo://article/view\?.*\buuid=([^&"\']+)',
+ ]
+ video_id = self._search_regex(
+ CONTENT_ID_REGEXES, webpage, 'content ID')
else:
items = json.loads(items_json)
info = items['mediaItems']['query']['results']['mediaObj'][0]
'info_dict': {
'id': '2189178',
'ext': 'flv',
- "title": "Zeichentrick 1",
- "age_limit": 18,
+ 'title': 'Zeichentrick 1',
+ 'age_limit': 18,
}
}
from __future__ import unicode_literals
import base64
+import random
+import string
+import time
from .common import InfoExtractor
from ..compat import (
'''
_TESTS = [{
+ # MD5 is unstable
'url': 'http://v.youku.com/v_show/id_XMTc1ODE5Njcy.html',
- 'md5': '5f3af4192eabacc4501508d54a8cabd7',
'info_dict': {
'id': 'XMTc1ODE5Njcy_part1',
'title': '★Smile﹗♡ Git Fresh -Booty Music舞蹈.',
'title': '武媚娘传奇 85',
},
'playlist_count': 11,
+ 'skip': 'Available in China only',
}, {
'url': 'http://v.youku.com/v_show/id_XMTI1OTczNDM5Mg==.html',
'info_dict': {
'title': '花千骨 04',
},
'playlist_count': 13,
- 'skip': 'Available in China only',
}, {
'url': 'http://v.youku.com/v_show/id_XNjA1NzA2Njgw.html',
'note': 'Video protected with password',
},
}]
- def construct_video_urls(self, data1, data2):
+ def construct_video_urls(self, data):
# get sid, token
def yk_t(s1, s2):
ls = list(range(256))
return bytes(s)
sid, token = yk_t(
- b'becaf9be', base64.b64decode(data2['ep'].encode('ascii'))
+ b'becaf9be', base64.b64decode(data['security']['encrypt_string'].encode('ascii'))
).decode('ascii').split('_')
# get oip
- oip = data2['ip']
-
- # get fileid
- string_ls = list(
- 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890')
- shuffled_string_ls = []
- seed = data1['seed']
- N = len(string_ls)
- for ii in range(N):
- seed = (seed * 0xd3 + 0x754f) % 0x10000
- idx = seed * len(string_ls) // 0x10000
- shuffled_string_ls.append(string_ls[idx])
- del string_ls[idx]
+ oip = data['security']['ip']
fileid_dict = {}
- for format in data1['streamtypes']:
- streamfileid = [
- int(i) for i in data1['streamfileids'][format].strip('*').split('*')]
- fileid = ''.join(
- [shuffled_string_ls[i] for i in streamfileid])
- fileid_dict[format] = fileid[:8] + '%s' + fileid[10:]
+ for stream in data['stream']:
+ format = stream.get('stream_type')
+ fileid = stream['stream_fileid']
+ fileid_dict[format] = fileid
def get_fileid(format, n):
- fileid = fileid_dict[format] % hex(int(n))[2:].upper().zfill(2)
+ number = hex(int(str(n), 10))[2:].upper()
+ if len(number) == 1:
+ number = '0' + number
+ streamfileids = fileid_dict[format]
+ fileid = streamfileids[0:8] + number + streamfileids[10:]
return fileid
# get ep
# generate video_urls
video_urls_dict = {}
- for format in data1['streamtypes']:
+ for stream in data['stream']:
+ format = stream.get('stream_type')
video_urls = []
- for dt in data1['segs'][format]:
- n = str(int(dt['no']))
+ for dt in stream['segs']:
+ n = str(stream['segs'].index(dt))
param = {
- 'K': dt['k'],
+ 'K': dt['key'],
'hd': self.get_hd(format),
'myp': 0,
- 'ts': dt['seconds'],
'ypp': 0,
'ctype': 12,
'ev': 1,
video_url = \
'http://k.youku.com/player/getFlvPath/' + \
'sid/' + sid + \
- '_' + str(int(n) + 1).zfill(2) + \
+ '_00' + \
'/st/' + self.parse_ext_l(format) + \
'/fileid/' + get_fileid(format, n) + '?' + \
compat_urllib_parse.urlencode(param)
return video_urls_dict
+ @staticmethod
+ def get_ysuid():
+ return '%d%s' % (int(time.time()), ''.join([
+ random.choice(string.ascii_letters) for i in range(3)]))
+
def get_hd(self, fm):
hd_id_dict = {
+ '3gp': '0',
+ '3gphd': '1',
'flv': '0',
+ 'flvhd': '0',
'mp4': '1',
+ 'mp4hd': '1',
+ 'mp4hd2': '1',
+ 'mp4hd3': '1',
'hd2': '2',
'hd3': '3',
- '3gp': '0',
- '3gphd': '1'
}
return hd_id_dict[fm]
def parse_ext_l(self, fm):
ext_dict = {
+ '3gp': 'flv',
+ '3gphd': 'mp4',
'flv': 'flv',
+ 'flvhd': 'flv',
'mp4': 'mp4',
+ 'mp4hd': 'mp4',
+ 'mp4hd2': 'flv',
+ 'mp4hd3': 'flv',
'hd2': 'flv',
'hd3': 'flv',
- '3gp': 'flv',
- '3gphd': 'mp4'
}
return ext_dict[fm]
'3gp': 'h6',
'3gphd': 'h5',
'flv': 'h4',
+ 'flvhd': 'h4',
'mp4': 'h3',
+ 'mp4hd': 'h3',
+ 'mp4hd2': 'h4',
+ 'mp4hd3': 'h4',
'hd2': 'h2',
- 'hd3': 'h1'
+ 'hd3': 'h1',
}
return _dict[fm]
def _real_extract(self, url):
video_id = self._match_id(url)
+ self._set_cookie('youku.com', '__ysuid', self.get_ysuid())
+
def retrieve_data(req_url, note):
- req = sanitized_Request(req_url)
+ headers = {
+ 'Referer': req_url,
+ }
+ self._set_cookie('youku.com', 'xreferrer', 'http://www.youku.com')
+ req = sanitized_Request(req_url, headers=headers)
cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
if cn_verification_proxy:
req.add_header('Ytdl-request-proxy', cn_verification_proxy)
raw_data = self._download_json(req, video_id, note=note)
- return raw_data['data'][0]
- video_password = self._downloader.params.get('videopassword', None)
+ return raw_data['data']
+
+ video_password = self._downloader.params.get('videopassword')
# request basic data
- basic_data_url = 'http://v.youku.com/player/getPlayList/VideoIDS/%s' % video_id
+ basic_data_url = 'http://play.youku.com/play/get.json?vid=%s&ct=12' % video_id
if video_password:
- basic_data_url += '?password=%s' % video_password
-
- data1 = retrieve_data(
- basic_data_url,
- 'Downloading JSON metadata 1')
- data2 = retrieve_data(
- 'http://v.youku.com/player/getPlayList/VideoIDS/%s/Pf/4/ctype/12/ev/1' % video_id,
- 'Downloading JSON metadata 2')
-
- error_code = data1.get('error_code')
- if error_code:
- error = data1.get('error')
- if error is not None and '因版权原因无法观看此视频' in error:
+ basic_data_url += '&pwd=%s' % video_password
+
+ data = retrieve_data(basic_data_url, 'Downloading JSON metadata')
+
+ error = data.get('error')
+ if error:
+ error_note = error.get('note')
+ if error_note is not None and '因版权原因无法观看此视频' in error_note:
raise ExtractorError(
'Youku said: Sorry, this video is available in China only', expected=True)
+ elif error_note and '该视频被设为私密' in error_note:
+ raise ExtractorError(
+ 'Youku said: Sorry, this video is private', expected=True)
else:
- msg = 'Youku server reported error %i' % error_code
- if error is not None:
- msg += ': ' + error
+ msg = 'Youku server reported error %i' % error.get('code')
+ if error_note is not None:
+ msg += ': ' + error_note
raise ExtractorError(msg)
- title = data1['title']
+ # get video title
+ title = data['video']['title']
# generate video_urls_dict
- video_urls_dict = self.construct_video_urls(data1, data2)
+ video_urls_dict = self.construct_video_urls(data)
# construct info
entries = [{
'formats': [],
# some formats are not available for all parts, we have to detect
# which one has all
- } for i in range(max(len(v) for v in data1['segs'].values()))]
- for fm in data1['streamtypes']:
+ } for i in range(max(len(v.get('segs')) for v in data['stream']))]
+ for stream in data['stream']:
+ fm = stream.get('stream_type')
video_urls = video_urls_dict[fm]
- for video_url, seg, entry in zip(video_urls, data1['segs'][fm], entries):
+ for video_url, seg, entry in zip(video_urls, stream['segs'], entries):
entry['formats'].append({
'url': video_url,
'format_id': self.get_format_name(fm),
formats.append(f)
self._sort_formats(formats)
- description = self._html_search_regex(
- r'(?s)<div[^>]+class=["\']video-description["\'][^>]*>(.+?)</div>',
- webpage, 'description', default=None)
+ description = self._og_search_description(webpage, default=None)
thumbnail = self._search_regex(
r'(?:imageurl\s*=|poster\s*:)\s*(["\'])(?P<thumbnail>.+?)\1',
webpage, 'thumbnail', fatal=False, group='thumbnail')
uploader = self._html_search_regex(
- r'(?s)<div[^>]+class=["\']videoInfoBy["\'][^>]*>\s*By:\s*</div>(.+?)</(?:a|div)>',
+ r'(?s)<div[^>]+class=["\']videoInfoBy(?:\s+[^"\']+)?["\'][^>]*>\s*By:\s*</div>(.+?)</(?:a|div)>',
webpage, 'uploader', fatal=False)
upload_date = unified_strdate(self._html_search_regex(
r'(?s)<div[^>]+class=["\']videoInfoTime["\'][^>]*>(.+?)</div>',
from ..utils import (
clean_html,
encode_dict,
+ error_to_compat_str,
ExtractorError,
float_or_none,
get_element_by_attribute,
get_element_by_id,
int_or_none,
+ mimetype2ext,
orderedSet,
parse_duration,
+ remove_quotes,
remove_start,
sanitized_Request,
smuggle_url,
return
-class YoutubeEntryListBaseInfoExtractor(InfoExtractor):
+class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
# Extract entries from page with "Load more" button
def _entries(self, page, playlist_id):
more_widget_html = content_html = page
class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
def _process_page(self, content):
- for playlist_id in re.findall(r'href="/?playlist\?list=(.+?)"', content):
+ for playlist_id in orderedSet(re.findall(r'href="/?playlist\?list=([0-9A-Za-z-_]{10,})"', content)):
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
- (?:.*?&)?? # any other preceding param (like /?s=tuff&v=xxxx)
+ (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY)
v=
)
))
$"""
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_formats = {
- '5': {'ext': 'flv', 'width': 400, 'height': 240},
- '6': {'ext': 'flv', 'width': 450, 'height': 270},
- '13': {'ext': '3gp'},
- '17': {'ext': '3gp', 'width': 176, 'height': 144},
- '18': {'ext': 'mp4', 'width': 640, 'height': 360},
- '22': {'ext': 'mp4', 'width': 1280, 'height': 720},
- '34': {'ext': 'flv', 'width': 640, 'height': 360},
- '35': {'ext': 'flv', 'width': 854, 'height': 480},
- '36': {'ext': '3gp', 'width': 320, 'height': 240},
- '37': {'ext': 'mp4', 'width': 1920, 'height': 1080},
- '38': {'ext': 'mp4', 'width': 4096, 'height': 3072},
- '43': {'ext': 'webm', 'width': 640, 'height': 360},
- '44': {'ext': 'webm', 'width': 854, 'height': 480},
- '45': {'ext': 'webm', 'width': 1280, 'height': 720},
- '46': {'ext': 'webm', 'width': 1920, 'height': 1080},
- '59': {'ext': 'mp4', 'width': 854, 'height': 480},
- '78': {'ext': 'mp4', 'width': 854, 'height': 480},
-
-
- # 3d videos
- '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'preference': -20},
- '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'preference': -20},
- '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'preference': -20},
- '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'preference': -20},
- '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'preference': -20},
- '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'preference': -20},
- '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'preference': -20},
+ '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
+ '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
+ '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
+ '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
+ '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
+ '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
+ '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
+ '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
+ # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
+ '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
+ '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
+ '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
+ '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
+ '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
+ '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
+ '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
+ '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
+ '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
+
+
+ # 3D videos
+ '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
+ '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
+ '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
+ '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
+ '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
+ '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
+ '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
# Apple HTTP Live Streaming
- '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
- '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'preference': -10},
- '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'preference': -10},
- '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'preference': -10},
- '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'preference': -10},
- '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
- '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'preference': -10},
+ '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
+ '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
+ '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
+ '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
+ '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
+ '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
+ '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
# DASH mp4 video
- '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
- '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
- '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
- '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
- '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
- '138': {'ext': 'mp4', 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
- '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
- '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
- '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
- '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
- '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'h264'},
+ '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
+ '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
+ '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
+ '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
+ '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
+ '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
+ '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
+ '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
+ '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
+ '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
+ '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
# Dash mp4 audio
- '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
- '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
- '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
+ '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
+ '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
+ '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
# Dash webm
- '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
- '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
- '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
- '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
- '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
- '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
- '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'vp9'},
- '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
- '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
- '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
- '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
- '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
- '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
- '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
- '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
- '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
- '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
- '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
- '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
- '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'vp9'},
- '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
+ '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
+ '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
+ '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
+ '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
+ '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
+ '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
+ '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9', 'preference': -40},
+ '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
+ '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
+ '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
+ '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
+ '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
+ '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
+ '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
+ '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
+ # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
+ '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
+ '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
+ '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
+ '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
+ '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
+ '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
# Dash webm audio
- '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
- '172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
+ '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
+ '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
# Dash webm audio with opus inside
- '249': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
- '250': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
- '251': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
+ '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
+ '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
+ '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
}
+ _SUBTITLE_FORMATS = ('ttml', 'vtt')
IE_NAME = 'youtube'
_TESTS = [
{
- 'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&t=1s&end=9',
+ 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'ext': 'mp4',
'upload_date': '20120506',
'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
+ 'alt_title': 'I Love It (feat. Charli XCX)',
'description': 'md5:782e8651347686cba06e58f71ab51773',
'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
'iconic ep', 'iconic', 'love', 'it'],
'uploader': 'Icona Pop',
'uploader_id': 'IconaPop',
+ 'creator': 'Icona Pop',
}
},
{
'ext': 'mp4',
'upload_date': '20130703',
'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
+ 'alt_title': 'Tunnel Vision',
'description': 'md5:64249768eec3bc4276236606ea996373',
'uploader': 'justintimberlakeVEVO',
'uploader_id': 'justintimberlakeVEVO',
+ 'creator': 'Justin Timberlake',
'age_limit': 18,
}
},
}
},
{
- 'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&v=UxxajLWwzqY',
+ 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'id': 'nfWlot6h_JM',
'ext': 'm4a',
'title': 'Taylor Swift - Shake It Off',
+ 'alt_title': 'Shake It Off',
'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
'uploader': 'TaylorSwiftVEVO',
'uploader_id': 'TaylorSwiftVEVO',
'upload_date': '20140818',
+ 'creator': 'Taylor Swift',
},
'params': {
'youtube_include_dash_manifest': True,
'ext': 'mp4',
'upload_date': '20100430',
'uploader_id': 'deadmau5',
+ 'creator': 'deadmau5',
'description': 'md5:12c56784b8032162bb936a5f76d55360',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
+ 'alt_title': 'Some Chords',
},
'expected_warnings': [
'DASH manifest missing',
},
'params': {
'skip_download': 'requires avconv',
- }
+ },
+ 'skip': 'This live event has ended.',
},
# Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
{
'skip_download': True,
},
},
+ {
+ # Multifeed video with comma in title (see https://github.com/rg3/youtube-dl/issues/8536)
+ 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
+ 'info_dict': {
+ 'id': 'gVfLd0zydlo',
+ 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
+ },
+ 'playlist_count': 2,
+ },
{
'url': 'http://vid.plus/FlRa-iH7PGw',
'only_matching': True,
},
{
# Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
+ # Also tests cut-off URL expansion in video description (see
+ # https://github.com/rg3/youtube-dl/issues/1892,
+ # https://github.com/rg3/youtube-dl/issues/8164)
'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
'info_dict': {
'id': 'lsguqyKfVQg',
'ext': 'mp4',
'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
+ 'alt_title': 'Dark Walk',
'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
'upload_date': '20151119',
'uploader_id': 'IronSoulElf',
'uploader': 'IronSoulElf',
+ 'creator': 'Todd Haberman, Daniel Law Heath & Aaron Kaplan',
},
'params': {
'skip_download': True,
'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
'only_matching': True,
},
+ {
+ # Video with yt:stretch=17:0
+ 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
+ 'info_dict': {
+ 'id': 'Q39EVAstoRM',
+ 'ext': 'mp4',
+ 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
+ 'description': 'md5:ee18a25c350637c8faff806845bddee9',
+ 'upload_date': '20151107',
+ 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
+ 'uploader': 'CH GAMER DROID',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
+ {
+ 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY',
+ 'only_matching': True,
+ }
]
def __init__(self, *args, **kwargs):
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
- self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
+ self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
return {}
sub_lang_list = {}
if lang in sub_lang_list:
continue
sub_formats = []
- for ext in ['sbv', 'vtt', 'srt']:
+ for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse.urlencode({
'lang': lang,
'v': video_id,
try:
args = player_config['args']
caption_url = args['ttsurl']
+ if not caption_url:
+ self._downloader.report_warning(err_msg)
+ return {}
timestamp = args['timestamp']
# We get the available subtitles
list_params = compat_urllib_parse.urlencode({
for lang_node in caption_list.findall('target'):
sub_lang = lang_node.attrib['lang_code']
sub_formats = []
- for ext in ['sbv', 'vtt', 'srt']:
+ for ext in self._SUBTITLE_FORMATS:
params = compat_urllib_parse.urlencode({
'lang': original_lang,
'tlang': sub_lang,
url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
- def _parse_dash_manifest(
- self, video_id, dash_manifest_url, player_url, age_gate, fatal=True):
- def decrypt_sig(mobj):
- s = mobj.group(1)
- dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
- return '/signature/%s' % dec_s
- dash_manifest_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, dash_manifest_url)
- dash_doc = self._download_xml(
- dash_manifest_url, video_id,
- note='Downloading DASH manifest',
- errnote='Could not download DASH manifest',
- fatal=fatal)
-
- if dash_doc is False:
- return []
-
- formats = []
- for a in dash_doc.findall('.//{urn:mpeg:DASH:schema:MPD:2011}AdaptationSet'):
- mime_type = a.attrib.get('mimeType')
- for r in a.findall('{urn:mpeg:DASH:schema:MPD:2011}Representation'):
- url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
- if url_el is None:
- continue
- if mime_type == 'text/vtt':
- # TODO implement WebVTT downloading
- pass
- elif mime_type.startswith('audio/') or mime_type.startswith('video/'):
- segment_list = r.find('{urn:mpeg:DASH:schema:MPD:2011}SegmentList')
- format_id = r.attrib['id']
- video_url = url_el.text
- filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
- f = {
- 'format_id': format_id,
- 'url': video_url,
- 'width': int_or_none(r.attrib.get('width')),
- 'height': int_or_none(r.attrib.get('height')),
- 'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
- 'asr': int_or_none(r.attrib.get('audioSamplingRate')),
- 'filesize': filesize,
- 'fps': int_or_none(r.attrib.get('frameRate')),
- }
- if segment_list is not None:
- f.update({
- 'initialization_url': segment_list.find('{urn:mpeg:DASH:schema:MPD:2011}Initialization').attrib['sourceURL'],
- 'segment_urls': [segment.attrib.get('media') for segment in segment_list.findall('{urn:mpeg:DASH:schema:MPD:2011}SegmentURL')],
- 'protocol': 'http_dash_segments',
- })
- try:
- existing_format = next(
- fo for fo in formats
- if fo['format_id'] == format_id)
- except StopIteration:
- full_info = self._formats.get(format_id, {}).copy()
- full_info.update(f)
- codecs = r.attrib.get('codecs')
- if codecs:
- if full_info.get('acodec') == 'none' and 'vcodec' not in full_info:
- full_info['vcodec'] = codecs
- elif full_info.get('vcodec') == 'none' and 'acodec' not in full_info:
- full_info['acodec'] = codecs
- formats.append(full_info)
- else:
- existing_format.update(f)
- else:
- self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
- return formats
-
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
video_description = re.sub(r'''(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
- title="([^"]+)"\s+
+ (?:title|href)="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
- class="yt-uix-redirect-link"\s*>
- [^<]+
+ class="(?:yt-uix-redirect-link|yt-uix-sessionlink[^"]*)"[^>]*>
+ [^<]+\.{3}\s*
</a>
''', r'\1', video_description)
video_description = clean_html(video_description)
if not self._downloader.params.get('noplaylist'):
entries = []
feed_ids = []
- multifeed_metadata_list = compat_urllib_parse_unquote_plus(video_info['multifeed_metadata_list'][0])
+ multifeed_metadata_list = video_info['multifeed_metadata_list'][0]
for feed in multifeed_metadata_list.split(','):
- feed_data = compat_parse_qs(feed)
+ # Unquote should take place before split on comma (,) since textual
+ # fields may contain comma as well (see
+ # https://github.com/rg3/youtube-dl/issues/8536)
+ feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
upload_date = unified_strdate(upload_date)
+ m_music = re.search(
+ r'<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*<ul[^>]*>\s*<li>(?P<title>.+?) by (?P<creator>.+?)(?:\(.+?\))?</li',
+ video_webpage)
+ if m_music:
+ video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
+ video_creator = clean_html(m_music.group('creator'))
+ else:
+ video_alt_title = video_creator = None
+
m_cat_container = self._search_regex(
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
video_webpage, 'categories', default=None)
if 'ratebypass' not in url:
url += '&ratebypass=yes'
+ dct = {
+ 'format_id': format_id,
+ 'url': url,
+ 'player_url': player_url,
+ }
+ if format_id in self._formats:
+ dct.update(self._formats[format_id])
+
# Some itags are not included in DASH manifest thus corresponding formats will
# lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
# Trying to extract metadata from url_encoded_fmt_stream_map entry.
mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
- dct = {
- 'format_id': format_id,
- 'url': url,
- 'player_url': player_url,
+
+ more_fields = {
'filesize': int_or_none(url_data.get('clen', [None])[0]),
'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
'width': width,
'fps': int_or_none(url_data.get('fps', [None])[0]),
'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0],
}
+ for key, value in more_fields.items():
+ if value:
+ dct[key] = value
type_ = url_data.get('type', [None])[0]
if type_:
type_split = type_.split(';')
kind_ext = type_split[0].split('/')
if len(kind_ext) == 2:
- kind, ext = kind_ext
- dct['ext'] = ext
+ kind, _ = kind_ext
+ dct['ext'] = mimetype2ext(type_split[0])
if kind in ('audio', 'video'):
codecs = None
for mobj in re.finditer(
if codecs:
codecs = codecs.split(',')
if len(codecs) == 2:
- acodec, vcodec = codecs[0], codecs[1]
+ acodec, vcodec = codecs[1], codecs[0]
else:
acodec, vcodec = (codecs[0], 'none') if kind == 'audio' else ('none', codecs[0])
dct.update({
'acodec': acodec,
'vcodec': vcodec,
})
- if format_id in self._formats:
- dct.update(self._formats[format_id])
formats.append(dct)
elif video_info.get('hlsvp'):
manifest_url = video_info['hlsvp'][0]
url_map = self._extract_from_m3u8(manifest_url, video_id)
formats = _map_to_format_list(url_map)
+ # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
+ for a_format in formats:
+ a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
else:
+ unavailable_message = self._html_search_regex(
+ r'(?s)<h1[^>]+id="unavailable-message"[^>]*>(.+?)</h1>',
+ video_webpage, 'unavailable message', default=None)
+ if unavailable_message:
+ raise ExtractorError(unavailable_message, expected=True)
raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
# Look for the DASH manifest
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_mpd_fatal = True
- for dash_manifest_url in dash_mpds:
+ for mpd_url in dash_mpds:
dash_formats = {}
try:
- for df in self._parse_dash_manifest(
- video_id, dash_manifest_url, player_url, age_gate, dash_mpd_fatal):
+ def decrypt_sig(mobj):
+ s = mobj.group(1)
+ dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
+ return '/signature/%s' % dec_s
+
+ mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
+
+ for df in self._extract_mpd_formats(
+ mpd_url, video_id, fatal=dash_mpd_fatal,
+ formats_dict=self._formats):
# Do not overwrite DASH format found in some previous DASH manifest
if df['format_id'] not in dash_formats:
dash_formats[df['format_id']] = df
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage)
if stretched_m:
- ratio = float(stretched_m.group('w')) / float(stretched_m.group('h'))
- for f in formats:
- if f.get('vcodec') != 'none':
- f['stretched_ratio'] = ratio
+ w = float(stretched_m.group('w'))
+ h = float(stretched_m.group('h'))
+ # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
+ # We will only process correct ratios.
+ if w > 0 and h > 0:
+ ratio = w / h
+ for f in formats:
+ if f.get('vcodec') != 'none':
+ f['stretched_ratio'] = ratio
self._sort_formats(formats)
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'upload_date': upload_date,
+ 'creator': video_creator,
'title': video_title,
+ 'alt_title': video_alt_title,
'thumbnail': video_thumbnail,
'description': video_description,
'categories': video_categories,
}
-class YoutubePlaylistIE(YoutubeBaseInfoExtractor, YoutubePlaylistBaseInfoExtractor):
+class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
youtube\.com/
(?:
(?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
- \? (?:.*?&)*? (?:p|a|list)=
+ \? (?:.*?[&;])*? (?:p|a|list)=
| p/
)
(
return self.playlist_result(self._entries(page, playlist_id), playlist_id, playlist_title)
- def _real_extract(self, url):
- # Extract playlist id
- mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- raise ExtractorError('Invalid URL: %s' % url)
- playlist_id = mobj.group(1) or mobj.group(2)
-
+ def _check_download_just_video(self, url, playlist_id):
# Check if it's a video-specific URL
query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
if 'v' in query_dict:
else:
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
+ def _real_extract(self, url):
+ # Extract playlist id
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ raise ExtractorError('Invalid URL: %s' % url)
+ playlist_id = mobj.group(1) or mobj.group(2)
+
+ video = self._check_download_just_video(url, playlist_id)
+ if video:
+ return video
+
if playlist_id.startswith('RD') or playlist_id.startswith('UL'):
# Mixes require a custom extraction process
return self._extract_mix(playlist_id)
},
}]
+ @classmethod
+ def suitable(cls, url):
+ return False if YoutubePlaylistsIE.suitable(url) else super(YoutubeChannelIE, cls).suitable(url)
+
def _real_extract(self, url):
channel_id = self._match_id(url)
class YoutubeUserIE(YoutubeChannelIE):
IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
- _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
+ _VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/user/%s/videos'
IE_NAME = 'youtube:user'
return super(YoutubeUserIE, cls).suitable(url)
-class YoutubeUserPlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
- IE_DESC = 'YouTube.com user playlists'
- _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/user/(?P<id>[^/]+)/playlists'
- IE_NAME = 'youtube:user:playlists'
+class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
+ IE_DESC = 'YouTube.com user/channel playlists'
+ _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
+ IE_NAME = 'youtube:playlists'
_TESTS = [{
'url': 'http://www.youtube.com/user/ThirstForScience/playlists',
'id': 'igorkle1',
'title': 'Игорь Клейнер',
},
+ }, {
+ 'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
+ 'playlist_mincount': 17,
+ 'info_dict': {
+ 'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
+ 'title': 'Chem Player',
+ },
}]
class YoutubeSearchURLIE(InfoExtractor):
IE_DESC = 'YouTube.com search URLs'
IE_NAME = 'youtube:search_url'
- _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
+ _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'title': 'youtube-dl test video',
}
+ }, {
+ 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
+ 'only_matching': True,
}]
def _real_extract(self, url):
class YoutubeWatchLaterIE(YoutubePlaylistIE):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
- _VALID_URL = r'https?://www\.youtube\.com/(?:feed/watch_later|playlist\?list=WL)|:ytwatchlater'
+ _VALID_URL = r'https?://www\.youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
- _TESTS = [] # override PlaylistIE tests
+ _TESTS = [{
+ 'url': 'https://www.youtube.com/playlist?list=WL',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
+ video = self._check_download_just_video(url, 'WL')
+ if video:
+ return video
return self._extract_playlist('WL')
unified_strdate,
OnDemandPagedList,
xpath_text,
+ determine_ext,
+ qualities,
+ float_or_none,
+ ExtractorError,
)
-def extract_from_xml_url(ie, video_id, xml_url):
- doc = ie._download_xml(
- xml_url, video_id,
- note='Downloading video info',
- errnote='Failed to download video info')
-
- title = doc.find('.//information/title').text
- description = xpath_text(doc, './/information/detail', 'description')
- duration = int_or_none(xpath_text(doc, './/details/lengthSec', 'duration'))
- uploader = xpath_text(doc, './/details/originChannelTitle', 'uploader')
- uploader_id = xpath_text(doc, './/details/originChannelId', 'uploader id')
- upload_date = unified_strdate(xpath_text(doc, './/details/airtime', 'upload date'))
-
- def xml_to_format(fnode):
- video_url = fnode.find('url').text
- is_available = 'http://www.metafilegenerator' not in video_url
-
- format_id = fnode.attrib['basetype']
- format_m = re.match(r'''(?x)
- (?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_
- (?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+)
- ''', format_id)
-
- ext = format_m.group('container')
- proto = format_m.group('proto').lower()
-
- quality = xpath_text(fnode, './quality', 'quality')
- abr = int_or_none(xpath_text(fnode, './audioBitrate', 'abr'), 1000)
- vbr = int_or_none(xpath_text(fnode, './videoBitrate', 'vbr'), 1000)
-
- width = int_or_none(xpath_text(fnode, './width', 'width'))
- height = int_or_none(xpath_text(fnode, './height', 'height'))
-
- filesize = int_or_none(xpath_text(fnode, './filesize', 'filesize'))
-
- format_note = ''
- if not format_note:
- format_note = None
-
- return {
- 'format_id': format_id + '-' + quality,
- 'url': video_url,
- 'ext': ext,
- 'acodec': format_m.group('acodec'),
- 'vcodec': format_m.group('vcodec'),
- 'abr': abr,
- 'vbr': vbr,
- 'width': width,
- 'height': height,
- 'filesize': filesize,
- 'format_note': format_note,
- 'protocol': proto,
- '_available': is_available,
- }
-
- def xml_to_thumbnails(fnode):
- thumbnails = []
- for node in fnode:
- thumbnail_url = node.text
- if not thumbnail_url:
- continue
- thumbnail = {
- 'url': thumbnail_url,
- }
- if 'key' in node.attrib:
- m = re.match('^([0-9]+)x([0-9]+)$', node.attrib['key'])
- if m:
- thumbnail['width'] = int(m.group(1))
- thumbnail['height'] = int(m.group(2))
- thumbnails.append(thumbnail)
- return thumbnails
-
- thumbnails = xml_to_thumbnails(doc.findall('.//teaserimages/teaserimage'))
-
- format_nodes = doc.findall('.//formitaeten/formitaet')
- formats = list(filter(
- lambda f: f['_available'],
- map(xml_to_format, format_nodes)))
- ie._sort_formats(formats)
-
- return {
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'duration': duration,
- 'thumbnails': thumbnails,
- 'uploader': uploader,
- 'uploader_id': uploader_id,
- 'upload_date': upload_date,
- 'formats': formats,
- }
-
-
class ZDFIE(InfoExtractor):
_VALID_URL = r'(?:zdf:|zdf:video:|https?://www\.zdf\.de/ZDFmediathek(?:#)?/(.*beitrag/(?:video/)?))(?P<id>[0-9]+)(?:/[^/?]+)?(?:\?.*)?'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.zdf.de/ZDFmediathek/beitrag/video/2037704/ZDFspezial---Ende-des-Machtpokers--?bc=sts;stt',
'info_dict': {
'id': '2037704',
'upload_date': '20131127',
},
'skip': 'Videos on ZDF.de are depublicised in short order',
- }
+ }]
+
+ def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
+ param_groups = {}
+ for param_group in smil.findall(self._xpath_ns('./head/paramGroup', namespace)):
+ group_id = param_group.attrib.get(self._xpath_ns('id', 'http://www.w3.org/XML/1998/namespace'))
+ params = {}
+ for param in param_group:
+ params[param.get('name')] = param.get('value')
+ param_groups[group_id] = params
+
+ formats = []
+ for video in smil.findall(self._xpath_ns('.//video', namespace)):
+ src = video.get('src')
+ if not src:
+ continue
+ bitrate = float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
+ group_id = video.get('paramGroup')
+ param_group = param_groups[group_id]
+ for proto in param_group['protocols'].split(','):
+ formats.append({
+ 'url': '%s://%s' % (proto, param_group['host']),
+ 'app': param_group['app'],
+ 'play_path': src,
+ 'ext': 'flv',
+ 'format_id': '%s-%d' % (proto, bitrate),
+ 'tbr': bitrate,
+ })
+ self._sort_formats(formats)
+ return formats
+
+ def extract_from_xml_url(self, video_id, xml_url):
+ doc = self._download_xml(
+ xml_url, video_id,
+ note='Downloading video info',
+ errnote='Failed to download video info')
+
+ status_code = doc.find('./status/statuscode')
+ if status_code is not None and status_code.text != 'ok':
+ code = status_code.text
+ if code == 'notVisibleAnymore':
+ message = 'Video %s is not available' % video_id
+ else:
+ message = '%s returned error: %s' % (self.IE_NAME, code)
+ raise ExtractorError(message, expected=True)
+
+ title = doc.find('.//information/title').text
+ description = xpath_text(doc, './/information/detail', 'description')
+ duration = int_or_none(xpath_text(doc, './/details/lengthSec', 'duration'))
+ uploader = xpath_text(doc, './/details/originChannelTitle', 'uploader')
+ uploader_id = xpath_text(doc, './/details/originChannelId', 'uploader id')
+ upload_date = unified_strdate(xpath_text(doc, './/details/airtime', 'upload date'))
+
+ def xml_to_thumbnails(fnode):
+ thumbnails = []
+ for node in fnode:
+ thumbnail_url = node.text
+ if not thumbnail_url:
+ continue
+ thumbnail = {
+ 'url': thumbnail_url,
+ }
+ if 'key' in node.attrib:
+ m = re.match('^([0-9]+)x([0-9]+)$', node.attrib['key'])
+ if m:
+ thumbnail['width'] = int(m.group(1))
+ thumbnail['height'] = int(m.group(2))
+ thumbnails.append(thumbnail)
+ return thumbnails
+
+ thumbnails = xml_to_thumbnails(doc.findall('.//teaserimages/teaserimage'))
+
+ format_nodes = doc.findall('.//formitaeten/formitaet')
+ quality = qualities(['veryhigh', 'high', 'med', 'low'])
+
+ def get_quality(elem):
+ return quality(xpath_text(elem, 'quality'))
+ format_nodes.sort(key=get_quality)
+ format_ids = []
+ formats = []
+ for fnode in format_nodes:
+ video_url = fnode.find('url').text
+ is_available = 'http://www.metafilegenerator' not in video_url
+ if not is_available:
+ continue
+ format_id = fnode.attrib['basetype']
+ quality = xpath_text(fnode, './quality', 'quality')
+ format_m = re.match(r'''(?x)
+ (?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_
+ (?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+)
+ ''', format_id)
+
+ ext = determine_ext(video_url, None) or format_m.group('container')
+ if ext not in ('smil', 'f4m', 'm3u8'):
+ format_id = format_id + '-' + quality
+ if format_id in format_ids:
+ continue
+
+ if ext == 'meta':
+ continue
+ elif ext == 'smil':
+ formats.extend(self._extract_smil_formats(
+ video_url, video_id, fatal=False))
+ elif ext == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ video_url, video_id, 'mp4', m3u8_id=format_id, fatal=False))
+ elif ext == 'f4m':
+ formats.extend(self._extract_f4m_formats(
+ video_url, video_id, f4m_id=format_id, fatal=False))
+ else:
+ proto = format_m.group('proto').lower()
+
+ abr = int_or_none(xpath_text(fnode, './audioBitrate', 'abr'), 1000)
+ vbr = int_or_none(xpath_text(fnode, './videoBitrate', 'vbr'), 1000)
+
+ width = int_or_none(xpath_text(fnode, './width', 'width'))
+ height = int_or_none(xpath_text(fnode, './height', 'height'))
+
+ filesize = int_or_none(xpath_text(fnode, './filesize', 'filesize'))
+
+ format_note = ''
+ if not format_note:
+ format_note = None
+
+ formats.append({
+ 'format_id': format_id,
+ 'url': video_url,
+ 'ext': ext,
+ 'acodec': format_m.group('acodec'),
+ 'vcodec': format_m.group('vcodec'),
+ 'abr': abr,
+ 'vbr': vbr,
+ 'width': width,
+ 'height': height,
+ 'filesize': filesize,
+ 'format_note': format_note,
+ 'protocol': proto,
+ '_available': is_available,
+ })
+ format_ids.append(format_id)
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'duration': duration,
+ 'thumbnails': thumbnails,
+ 'uploader': uploader,
+ 'uploader_id': uploader_id,
+ 'upload_date': upload_date,
+ 'formats': formats,
+ }
def _real_extract(self, url):
video_id = self._match_id(url)
xml_url = 'http://www.zdf.de/ZDFmediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id
- return extract_from_xml_url(self, video_id, xml_url)
+ return self.extract_from_xml_url(video_id, xml_url)
class ZDFChannelIE(InfoExtractor):
- _VALID_URL = r'(?:zdf:topic:|https?://www\.zdf\.de/ZDFmediathek(?:#)?/.*kanaluebersicht/)(?P<id>[0-9]+)'
- _TEST = {
+ _VALID_URL = r'(?:zdf:topic:|https?://www\.zdf\.de/ZDFmediathek(?:#)?/.*kanaluebersicht/(?:[^/]+/)?)(?P<id>[0-9]+)'
+ _TESTS = [{
'url': 'http://www.zdf.de/ZDFmediathek#/kanaluebersicht/1586442/sendung/Titanic',
'info_dict': {
'id': '1586442',
},
'playlist_count': 3,
- }
+ }, {
+ 'url': 'http://www.zdf.de/ZDFmediathek/kanaluebersicht/aktuellste/332',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.zdf.de/ZDFmediathek/kanaluebersicht/meist-gesehen/332',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.zdf.de/ZDFmediathek/kanaluebersicht/_/1798716?bc=nrt;nrm?flash=off',
+ 'only_matching': True,
+ }]
_PAGE_SIZE = 50
def _fetch_page(self, channel_id, page):
--- /dev/null
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ determine_ext,
+ str_to_int,
+)
+
+
+class ZippCastIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?zippcast\.com/(?:video/|videoview\.php\?.*\bvplay=)(?P<id>[0-9a-zA-Z]+)'
+ _TESTS = [{
+ # m3u8, hq direct link
+ 'url': 'http://www.zippcast.com/video/c9cfd5c7e44dbc29c81',
+ 'md5': '5ea0263b5606866c4d6cda0fc5e8c6b6',
+ 'info_dict': {
+ 'id': 'c9cfd5c7e44dbc29c81',
+ 'ext': 'mp4',
+ 'title': '[Vinesauce] Vinny - Digital Space Traveler',
+ 'description': 'Muted on youtube, but now uploaded in it\'s original form.',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'uploader': 'vinesauce',
+ 'view_count': int,
+ 'categories': ['Entertainment'],
+ 'tags': list,
+ },
+ }, {
+ # f4m, lq ipod direct link
+ 'url': 'http://www.zippcast.com/video/b79c0a233e9c6581775',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.zippcast.com/videoview.php?vplay=c9cfd5c7e44dbc29c81&auto=no',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(
+ 'http://www.zippcast.com/video/%s' % video_id, video_id)
+
+ formats = []
+ video_url = self._search_regex(
+ r'<source[^>]+src=(["\'])(?P<url>.+?)\1', webpage,
+ 'video url', default=None, group='url')
+ if video_url:
+ formats.append({
+ 'url': video_url,
+ 'format_id': 'http',
+ 'preference': 0, # direct link is almost always of worse quality
+ })
+ src_url = self._search_regex(
+ r'src\s*:\s*(?:escape\()?(["\'])(?P<url>http://.+?)\1',
+ webpage, 'src', default=None, group='url')
+ ext = determine_ext(src_url)
+ if ext == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ src_url, video_id, 'mp4', entry_protocol='m3u8_native',
+ m3u8_id='hls', fatal=False))
+ elif ext == 'f4m':
+ formats.extend(self._extract_f4m_formats(
+ src_url, video_id, f4m_id='hds', fatal=False))
+ self._sort_formats(formats)
+
+ title = self._og_search_title(webpage)
+ description = self._og_search_description(webpage) or self._html_search_meta(
+ 'description', webpage)
+ uploader = self._search_regex(
+ r'<a[^>]+href="https?://[^/]+/profile/[^>]+>([^<]+)</a>',
+ webpage, 'uploader', fatal=False)
+ thumbnail = self._og_search_thumbnail(webpage)
+ view_count = str_to_int(self._search_regex(
+ r'>([\d,.]+) views!', webpage, 'view count', fatal=False))
+
+ categories = re.findall(
+ r'<a[^>]+href="https?://[^/]+/categories/[^"]+">([^<]+),?<',
+ webpage)
+ tags = re.findall(
+ r'<a[^>]+href="https?://[^/]+/search/tags/[^"]+">([^<]+),?<',
+ webpage)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'uploader': uploader,
+ 'view_count': view_count,
+ 'categories': categories,
+ 'tags': tags,
+ 'formats': formats,
+ }
def extract_function(self, funcname):
func_m = re.search(
r'''(?x)
- (?:function\s+%s|[{;]%s\s*=\s*function|var\s+%s\s*=\s*function)\s*
+ (?:function\s+%s|[{;,]%s\s*=\s*function|var\s+%s\s*=\s*function)\s*
\((?P<args>[^)]*)\)\s*
\{(?P<code>[^}]+)\}''' % (
re.escape(funcname), re.escape(funcname), re.escape(funcname)),
if option.takes_value():
opts.append(' %s' % option.metavar)
- return "".join(opts)
+ return ''.join(opts)
def _comma_separated_values_options_callback(option, opt_str, value, parser):
setattr(parser.values, option.dest, value.split(','))
video_format.add_option(
'-F', '--list-formats',
action='store_true', dest='listformats',
- help='List all available formats of specified videos')
+ help='List all available formats of requested videos')
video_format.add_option(
'--youtube-include-dash-manifest',
action='store_true', dest='youtube_include_dash_manifest', default=True,
'--sub-lang', '--sub-langs', '--srt-lang',
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
default=[], callback=_comma_separated_values_options_callback,
- help='Languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
+ help='Languages of the subtitles to download (optional) separated by commas, use --list-subs for available language tags')
downloader = optparse.OptionGroup(parser, 'Download Options')
downloader.add_option(
'--hls-prefer-native',
dest='hls_prefer_native', action='store_true',
help='Use the native HLS downloader instead of ffmpeg (experimental)')
+ downloader.add_option(
+ '--hls-use-mpegts',
+ dest='hls_use_mpegts', action='store_true',
+ help='Use the mpegts container for HLS videos, allowing to play the '
+ 'video while downloading (some players may not be able to play it)')
downloader.add_option(
'--external-downloader',
dest='external_downloader', metavar='COMMAND',
metavar='CMD', dest='exec_cmd',
help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'')
postproc.add_option(
- '--convert-subtitles', '--convert-subs',
+ '--convert-subs', '--convert-subtitles',
metavar='FORMAT', dest='convertsubtitles', default=None,
help='Convert the subtitles to other format (currently supported: srt|ass|vtt)')
'Skipping embedding the thumbnail because the file is missing.')
return [], info
- if info['ext'] == 'mp3':
+ if info['ext'] in ('mp3', 'mkv'):
options = [
'-c', 'copy', '-map', '0', '-map', '1',
'-metadata:s:v', 'title="Album cover"', '-metadata:s:v', 'comment="Cover (Front)"']
cmd = cmd.replace('{}', shlex_quote(information['filepath']))
- self._downloader.to_screen("[exec] Executing command: %s" % cmd)
+ self._downloader.to_screen('[exec] Executing command: %s' % cmd)
retCode = subprocess.call(cmd, shell=True)
if retCode != 0:
raise PostProcessingError(
def _determine_executables(self):
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
- prefer_ffmpeg = self._downloader.params.get('prefer_ffmpeg', False)
+ prefer_ffmpeg = False
self.basename = None
self.probe_basename = None
self._paths = None
self._versions = None
if self._downloader:
+ prefer_ffmpeg = self._downloader.params.get('prefer_ffmpeg', False)
location = self._downloader.params.get('ffmpeg_location')
if location is not None:
if not os.path.exists(location):
for (name, value) in metadata.items():
options.extend(['-metadata', '%s=%s' % (name, value)])
+ # https://github.com/rg3/youtube-dl/issues/8350
+ if info.get('protocol') == 'm3u8_native' or info.get('protocol') == 'm3u8' and self._downloader.params.get('hls_prefer_native', False):
+ options.extend(['-bsf:a', 'aac_adtstoasc'])
+
self._downloader.to_screen('[ffmpeg] Adding metadata to \'%s\'' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to convert')
return [], info
self._downloader.to_screen('[ffmpeg] Converting subtitles')
+ sub_filenames = []
for lang, sub in subs.items():
ext = sub['ext']
if ext == new_ext:
'[ffmpeg] Subtitle file for %s is already in the requested'
'format' % new_ext)
continue
+ old_file = subtitles_filename(filename, lang, ext)
+ sub_filenames.append(old_file)
new_file = subtitles_filename(filename, lang, new_ext)
if ext == 'dfxp' or ext == 'ttml':
'You have requested to convert dfxp (TTML) subtitles into another format, '
'which results in style information loss')
- dfxp_file = subtitles_filename(filename, lang, ext)
+ dfxp_file = old_file
srt_file = subtitles_filename(filename, lang, 'srt')
with io.open(dfxp_file, 'rt', encoding='utf-8') as f:
with io.open(srt_file, 'wt', encoding='utf-8') as f:
f.write(srt_data)
+ old_file = srt_file
- ext = 'srt'
subs[lang] = {
'ext': 'srt',
'data': srt_data
if new_ext == 'srt':
continue
+ else:
+ sub_filenames.append(srt_file)
- self.run_ffmpeg(
- subtitles_filename(filename, lang, ext),
- new_file, ['-f', new_format])
+ self.run_ffmpeg(old_file, new_file, ['-f', new_format])
with io.open(new_file, 'rt', encoding='utf-8') as f:
subs[lang] = {
- 'ext': ext,
+ 'ext': new_ext,
'data': f.read(),
}
- return [], info
+ return sub_filenames, info
'(?P<title>.+)\ \-\ (?P<artist>.+)'
"""
lastpos = 0
- regex = ""
+ regex = ''
# replace %(..)s with regex group and escape other string parts
for match in re.finditer(r'%\((\w+)\)s', fmt):
regex += re.escape(fmt[lastpos:match.start()])
assert ':' not in key
assert os.path.exists(path)
- ads_fn = path + ":" + key
+ ads_fn = path + ':' + key
try:
- with open(ads_fn, "wb") as f:
+ with open(ads_fn, 'wb') as f:
f.write(value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
else:
- user_has_setfattr = check_executable("setfattr", ['--version'])
- user_has_xattr = check_executable("xattr", ['-h'])
+ user_has_setfattr = check_executable('setfattr', ['--version'])
+ user_has_xattr = check_executable('xattr', ['-h'])
if user_has_setfattr or user_has_xattr:
value = info.get(infoname)
if value:
- if infoname == "upload_date":
+ if infoname == 'upload_date':
value = hyphenate_date(value)
byte_value = value.encode('utf-8')
elif mname in _builtin_classes:
res = _builtin_classes[mname]
else:
- # Assume unitialized
+ # Assume uninitialized
# TODO warn here
res = undefined
stack.append(res)
import sys
from zipimport import zipimporter
-from .compat import compat_str
+from .utils import encode_compat_str
from .version import __version__
def rsa_verify(message, signature, key):
- from struct import pack
from hashlib import sha256
-
assert isinstance(message, bytes)
- block_size = 0
- n = key[0]
- while n:
- block_size += 1
- n >>= 8
- signature = pow(int(signature, 16), key[1], key[0])
- raw_bytes = []
- while signature:
- raw_bytes.insert(0, pack("B", signature & 0xFF))
- signature >>= 8
- signature = (block_size - len(raw_bytes)) * b'\x00' + b''.join(raw_bytes)
- if signature[0:2] != b'\x00\x01':
- return False
- signature = signature[2:]
- if b'\x00' not in signature:
- return False
- signature = signature[signature.index(b'\x00') + 1:]
- if not signature.startswith(b'\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20'):
- return False
- signature = signature[19:]
- if signature != sha256(message).digest():
+ byte_size = (len(bin(key[0])) - 2 + 8 - 1) // 8
+ signature = ('%x' % pow(int(signature, 16), key[1], key[0])).encode()
+ signature = (byte_size * 2 - len(signature)) * b'0' + signature
+ asn1 = b'3031300d060960864801650304020105000420'
+ asn1 += sha256(message).hexdigest().encode()
+ if byte_size < len(asn1) // 2 + 11:
return False
- return True
+ expected = b'0001' + (byte_size - len(asn1) // 2 - 3) * b'ff' + b'00' + asn1
+ return expected == signature
def update_self(to_screen, verbose, opener):
"""Update the program file with the latest version from the repository"""
- UPDATE_URL = "https://rg3.github.io/youtube-dl/update/"
+ UPDATE_URL = 'https://rg3.github.io/youtube-dl/update/'
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
JSON_URL = UPDATE_URL + 'versions.json'
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
- if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, "frozen"):
+ if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, 'frozen'):
to_screen('It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.')
return
newversion = opener.open(VERSION_URL).read().decode('utf-8').strip()
except Exception:
if verbose:
- to_screen(compat_str(traceback.format_exc()))
+ to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: can\'t find the current version. Please try again later.')
return
if newversion == __version__:
versions_info = json.loads(versions_info)
except Exception:
if verbose:
- to_screen(compat_str(traceback.format_exc()))
+ to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: can\'t obtain versions info. Please try again later.')
return
if 'signature' not in versions_info:
filename = sys.argv[0]
# Py2EXE: Filename could be different
- if hasattr(sys, "frozen") and not os.path.isfile(filename):
+ if hasattr(sys, 'frozen') and not os.path.isfile(filename):
if os.path.isfile(filename + '.exe'):
filename += '.exe'
return
# Py2EXE
- if hasattr(sys, "frozen"):
+ if hasattr(sys, 'frozen'):
exe = os.path.abspath(filename)
directory = os.path.dirname(exe)
if not os.access(directory, os.W_OK):
urlh.close()
except (IOError, OSError):
if verbose:
- to_screen(compat_str(traceback.format_exc()))
+ to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to download latest version')
return
outf.write(newcontent)
except (IOError, OSError):
if verbose:
- to_screen(compat_str(traceback.format_exc()))
+ to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to write the new version')
return
return # Do not show premature success messages
except (IOError, OSError):
if verbose:
- to_screen(compat_str(traceback.format_exc()))
+ to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to overwrite current version')
return
urlh.close()
except (IOError, OSError):
if verbose:
- to_screen(compat_str(traceback.format_exc()))
+ to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to download latest version')
return
outf.write(newcontent)
except (IOError, OSError):
if verbose:
- to_screen(compat_str(traceback.format_exc()))
+ to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to overwrite current version')
return
from __future__ import unicode_literals
import base64
+import binascii
import calendar
import codecs
import contextlib
compiled_regex_type = type(re.compile(''))
std_headers = {
- 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/20.0 (Chrome)',
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/44.0 (Chrome)',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
+KNOWN_EXTENSIONS = (
+ 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
+ 'flv', 'f4v', 'f4a', 'f4b',
+ 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
+ 'mkv', 'mka', 'mk3d',
+ 'avi', 'divx',
+ 'mov',
+ 'asf', 'wmv', 'wma',
+ '3gp', '3g2',
+ 'mp3',
+ 'flac',
+ 'ape',
+ 'wav',
+ 'f4f', 'f4m', 'm3u8', 'smil')
+
def preferredencoding():
"""Get preferred encoding.
def get_element_by_id(id, html):
"""Return the content of the tag with the specified ID in the passed HTML document"""
- return get_element_by_attribute("id", id, html)
+ return get_element_by_attribute('id', id, html)
def get_element_by_attribute(attribute, value, html):
return hc
+def handle_youtubedl_headers(headers):
+ filtered_headers = headers
+
+ if 'Youtubedl-no-compression' in filtered_headers:
+ filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
+ del filtered_headers['Youtubedl-no-compression']
+
+ return filtered_headers
+
+
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
"""Handler for HTTP requests and responses.
the standard headers to every HTTP request and handles gzipped and
deflated responses from web servers. If compression is to be avoided in
a particular request, the original request in the program code only has
- to include the HTTP header "Youtubedl-No-Compression", which will be
+ to include the HTTP header "Youtubedl-no-compression", which will be
removed before making the real request.
Part of this code was copied from:
# The dict keys are capitalized because of this bug by urllib
if h.capitalize() not in req.headers:
req.add_header(h, v)
- if 'Youtubedl-no-compression' in req.headers:
- if 'Accept-encoding' in req.headers:
- del req.headers['Accept-encoding']
- del req.headers['Youtubedl-no-compression']
+
+ req.headers = handle_youtubedl_headers(req.headers)
if sys.version_info < (2, 7) and '#' in req.get_full_url():
# Python 2.6 is brain-dead when it comes to fragments
raise original_ioerror
resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
+ del resp.headers['Content-encoding']
# deflate
if resp.headers.get('Content-encoding', '') == 'deflate':
gz = io.BytesIO(self.deflate(resp.read()))
resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
+ del resp.headers['Content-encoding']
# Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
# https://github.com/rg3/youtube-dl/issues/6457).
if 300 <= resp.code < 400:
guess = url.partition('?')[0].rpartition('.')[2]
if re.match(r'^[A-Za-z0-9]+$', guess):
return guess
- elif guess.rstrip('/') in (
- 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
- 'flv', 'f4v', 'f4a', 'f4b',
- 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
- 'mkv', 'mka', 'mk3d',
- 'avi', 'divx',
- 'mov',
- 'asf', 'wmv', 'wma',
- '3gp', '3g2',
- 'mp3',
- 'flac',
- 'ape',
- 'wav',
- 'f4f', 'f4m', 'm3u8', 'smil'):
+ # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
+ elif guess.rstrip('/') in KNOWN_EXTENSIONS:
return guess.rstrip('/')
else:
return default_ext
if sign == '-':
time = -time
unit = match.group('unit')
- # A bad aproximation?
+ # A bad approximation?
if unit == 'month':
unit = 'day'
time *= 30
unit += 's'
delta = datetime.timedelta(**{unit: time})
return today + delta
- return datetime.datetime.strptime(date_str, "%Y%m%d").date()
+ return datetime.datetime.strptime(date_str, '%Y%m%d').date()
def hyphenate_date(date_str):
GetStdHandle = ctypes.WINFUNCTYPE(
ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
- (b"GetStdHandle", ctypes.windll.kernel32))
+ (b'GetStdHandle', ctypes.windll.kernel32))
h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
WriteConsoleW = ctypes.WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
- ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32))
+ ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32))
written = ctypes.wintypes.DWORD(0)
- GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32))
+ GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32))
FILE_TYPE_CHAR = 0x0002
FILE_TYPE_REMOTE = 0x8000
GetConsoleMode = ctypes.WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
ctypes.POINTER(ctypes.wintypes.DWORD))(
- (b"GetConsoleMode", ctypes.windll.kernel32))
+ (b'GetConsoleMode', ctypes.windll.kernel32))
INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
def not_a_console(handle):
if s is None:
return None
- # The lower-case forms are of course incorrect and inofficial,
+ # The lower-case forms are of course incorrect and unofficial,
# but we support those too
_UNIT_TABLE = {
'B': 1,
def setproctitle(title):
assert isinstance(title, compat_str)
try:
- libc = ctypes.cdll.LoadLibrary("libc.so.6")
+ libc = ctypes.cdll.LoadLibrary('libc.so.6')
except OSError:
return
title_bytes = title.encode('utf-8')
return s
+def remove_quotes(s):
+ if s is None or len(s) < 2:
+ return s
+ for quote in ('"', "'", ):
+ if s[0] == quote and s[-1] == quote:
+ return s[1:-1]
+ return s
+
+
def url_basename(url):
path = compat_urlparse.urlparse(url).path
return path.strip('/').split('/')[-1]
class HEADRequest(compat_urllib_request.Request):
def get_method(self):
- return "HEAD"
+ return 'HEAD'
def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
return dict((encode(k), encode(v)) for k, v in d.items())
+def dict_get(d, key_or_keys, default=None, skip_false_values=True):
+ if isinstance(key_or_keys, (list, tuple)):
+ for key in key_or_keys:
+ if key not in d or d[key] is None or skip_false_values and not d[key]:
+ continue
+ return d[key]
+ return default
+ return d.get(key_or_keys, default)
+
+
+def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
+ return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
+
+
US_RATINGS = {
'G': 0,
'PG': 10,
if s is None:
return None
m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
- return int(m.group('age')) if m else US_RATINGS.get(s, None)
+ return int(m.group('age')) if m else US_RATINGS.get(s)
def strip_jsonp(code):
return re.sub(
- r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code)
+ r'(?s)^[a-zA-Z0-9_.]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code)
def js_to_json(code):
return ' '.join(shlex_quote(a) for a in args)
+def error_to_compat_str(err):
+ err_str = str(err)
+ # On python 2 error byte string must be decoded with proper
+ # encoding rather than ascii
+ if sys.version_info[0] < 3:
+ err_str = err_str.decode(preferredencoding())
+ return err_str
+
+
def mimetype2ext(mt):
+ ext = {
+ 'audio/mp4': 'm4a',
+ }.get(mt)
+ if ext is not None:
+ return ext
+
_, _, res = mt.rpartition('/')
return {
- 'x-ms-wmv': 'wmv',
- 'x-mp4-fragmented': 'mp4',
+ '3gpp': '3gp',
+ 'smptett+xml': 'tt',
+ 'srt': 'srt',
+ 'ttaf+xml': 'dfxp',
'ttml+xml': 'ttml',
+ 'vtt': 'vtt',
+ 'x-flv': 'flv',
+ 'x-mp4-fragmented': 'mp4',
+ 'x-ms-wmv': 'wmv',
}.get(res, res)
def parse_dfxp_time_expr(time_expr):
if not time_expr:
- return 0.0
+ return
mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
if mobj:
return float(mobj.group('time_offset'))
- mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:\.\d+)?)$', time_expr)
+ mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
if mobj:
- return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3))
+ return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
def srt_subtitles_timecode(seconds):
'ttaf1': 'http://www.w3.org/2006/10/ttaf1',
})
- def parse_node(node):
- str_or_empty = functools.partial(str_or_none, default='')
+ class TTMLPElementParser(object):
+ out = ''
- out = str_or_empty(node.text)
+ def start(self, tag, attrib):
+ if tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'):
+ self.out += '\n'
- for child in node:
- if child.tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'):
- out += '\n' + str_or_empty(child.tail)
- elif child.tag in (_x('ttml:span'), _x('ttaf1:span'), 'span'):
- out += str_or_empty(parse_node(child))
- else:
- out += str_or_empty(xml.etree.ElementTree.tostring(child))
+ def end(self, tag):
+ pass
- return out
+ def data(self, data):
+ self.out += data
+
+ def close(self):
+ return self.out.strip()
+
+ def parse_node(node):
+ target = TTMLPElementParser()
+ parser = xml.etree.ElementTree.XMLParser(target=target)
+ parser.feed(xml.etree.ElementTree.tostring(node))
+ return parser.close()
dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8'))
out = []
raise ValueError('Invalid dfxp/TTML subtitle')
for para, index in zip(paras, itertools.count(1)):
- begin_time = parse_dfxp_time_expr(para.attrib['begin'])
+ begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
end_time = parse_dfxp_time_expr(para.attrib.get('end'))
+ dur = parse_dfxp_time_expr(para.attrib.get('dur'))
+ if begin_time is None:
+ continue
if not end_time:
- end_time = begin_time + parse_dfxp_time_expr(para.attrib['dur'])
+ if not dur:
+ continue
+ end_time = begin_time + dur
out.append('%d\n%s --> %s\n%s\n\n' % (
index,
srt_subtitles_timecode(begin_time),
return None # No Proxy
return compat_urllib_request.ProxyHandler.proxy_open(
self, req, proxy, type)
+
+
+def ohdave_rsa_encrypt(data, exponent, modulus):
+ '''
+ Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
+
+ Input:
+ data: data to encrypt, bytes-like object
+ exponent, modulus: parameter e and N of RSA algorithm, both integer
+ Output: hex string of encrypted data
+
+ Limitation: supports one block encryption only
+ '''
+
+ payload = int(binascii.hexlify(data[::-1]), 16)
+ encrypted = pow(payload, exponent, modulus)
+ return '%x' % encrypted
from __future__ import unicode_literals
-__version__ = '2015.11.27.1'
+__version__ = '2016.02.22'