]> Raphaël G. Git Repositories - youtubedl/blob - youtube_dl/extractor/naver.py
debian/README.source: Add my preference with respect to patches.
[youtubedl] / youtube_dl / extractor / naver.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from ..utils import (
8 ExtractorError,
9 int_or_none,
10 update_url_query,
11 )
12
13
14 class NaverIE(InfoExtractor):
15 _VALID_URL = r'https?://(?:m\.)?tv(?:cast)?\.naver\.com/v/(?P<id>\d+)'
16
17 _TESTS = [{
18 'url': 'http://tv.naver.com/v/81652',
19 'info_dict': {
20 'id': '81652',
21 'ext': 'mp4',
22 'title': '[9월 모의고사 해설강의][수학_김상희] 수학 A형 16~20번',
23 'description': '합격불변의 법칙 메가스터디 | 메가스터디 수학 김상희 선생님이 9월 모의고사 수학A형 16번에서 20번까지 해설강의를 공개합니다.',
24 'upload_date': '20130903',
25 },
26 }, {
27 'url': 'http://tv.naver.com/v/395837',
28 'md5': '638ed4c12012c458fefcddfd01f173cd',
29 'info_dict': {
30 'id': '395837',
31 'ext': 'mp4',
32 'title': '9년이 지나도 아픈 기억, 전효성의 아버지',
33 'description': 'md5:5bf200dcbf4b66eb1b350d1eb9c753f7',
34 'upload_date': '20150519',
35 },
36 'skip': 'Georestricted',
37 }, {
38 'url': 'http://tvcast.naver.com/v/81652',
39 'only_matching': True,
40 }]
41
42 def _real_extract(self, url):
43 video_id = self._match_id(url)
44 webpage = self._download_webpage(url, video_id)
45
46 m_id = re.search(r'var rmcPlayer = new nhn\.rmcnmv\.RMCVideoPlayer\("(.+?)", "(.+?)"',
47 webpage)
48 if m_id is None:
49 error = self._html_search_regex(
50 r'(?s)<div class="(?:nation_error|nation_box|error_box)">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>',
51 webpage, 'error', default=None)
52 if error:
53 raise ExtractorError(error, expected=True)
54 raise ExtractorError('couldn\'t extract vid and key')
55 video_data = self._download_json(
56 'http://play.rmcnmv.naver.com/vod/play/v2.0/' + m_id.group(1),
57 video_id, query={
58 'key': m_id.group(2),
59 })
60 meta = video_data['meta']
61 title = meta['subject']
62 formats = []
63
64 def extract_formats(streams, stream_type, query={}):
65 for stream in streams:
66 stream_url = stream.get('source')
67 if not stream_url:
68 continue
69 stream_url = update_url_query(stream_url, query)
70 encoding_option = stream.get('encodingOption', {})
71 bitrate = stream.get('bitrate', {})
72 formats.append({
73 'format_id': '%s_%s' % (stream.get('type') or stream_type, encoding_option.get('id') or encoding_option.get('name')),
74 'url': stream_url,
75 'width': int_or_none(encoding_option.get('width')),
76 'height': int_or_none(encoding_option.get('height')),
77 'vbr': int_or_none(bitrate.get('video')),
78 'abr': int_or_none(bitrate.get('audio')),
79 'filesize': int_or_none(stream.get('size')),
80 'protocol': 'm3u8_native' if stream_type == 'HLS' else None,
81 })
82
83 extract_formats(video_data.get('videos', {}).get('list', []), 'H264')
84 for stream_set in video_data.get('streams', []):
85 query = {}
86 for param in stream_set.get('keys', []):
87 query[param['name']] = param['value']
88 stream_type = stream_set.get('type')
89 videos = stream_set.get('videos')
90 if videos:
91 extract_formats(videos, stream_type, query)
92 elif stream_type == 'HLS':
93 stream_url = stream_set.get('source')
94 if not stream_url:
95 continue
96 formats.extend(self._extract_m3u8_formats(
97 update_url_query(stream_url, query), video_id,
98 'mp4', 'm3u8_native', m3u8_id=stream_type, fatal=False))
99 self._sort_formats(formats)
100
101 subtitles = {}
102 for caption in video_data.get('captions', {}).get('list', []):
103 caption_url = caption.get('source')
104 if not caption_url:
105 continue
106 subtitles.setdefault(caption.get('language') or caption.get('locale'), []).append({
107 'url': caption_url,
108 })
109
110 upload_date = self._search_regex(
111 r'<span[^>]+class="date".*?(\d{4}\.\d{2}\.\d{2})',
112 webpage, 'upload date', fatal=False)
113 if upload_date:
114 upload_date = upload_date.replace('.', '')
115
116 return {
117 'id': video_id,
118 'title': title,
119 'formats': formats,
120 'subtitles': subtitles,
121 'description': self._og_search_description(webpage),
122 'thumbnail': meta.get('cover', {}).get('source') or self._og_search_thumbnail(webpage),
123 'view_count': int_or_none(meta.get('count')),
124 'upload_date': upload_date,
125 }