2 from __future__
import unicode_literals
4 from .common
import InfoExtractor
12 class NaverIE(InfoExtractor
):
13 _VALID_URL
= r
'https?://(?:m\.)?tv(?:cast)?\.naver\.com/v/(?P<id>\d+)'
16 'url': 'http://tv.naver.com/v/81652',
20 'title': '[9월 모의고사 해설강의][수학_김상희] 수학 A형 16~20번',
21 'description': '합격불변의 법칙 메가스터디 | 메가스터디 수학 김상희 선생님이 9월 모의고사 수학A형 16번에서 20번까지 해설강의를 공개합니다.',
22 'upload_date': '20130903',
25 'url': 'http://tv.naver.com/v/395837',
26 'md5': '638ed4c12012c458fefcddfd01f173cd',
30 'title': '9년이 지나도 아픈 기억, 전효성의 아버지',
31 'description': 'md5:5bf200dcbf4b66eb1b350d1eb9c753f7',
32 'upload_date': '20150519',
34 'skip': 'Georestricted',
36 'url': 'http://tvcast.naver.com/v/81652',
37 'only_matching': True,
40 def _real_extract(self
, url
):
41 video_id
= self
._match
_id
(url
)
42 webpage
= self
._download
_webpage
(url
, video_id
)
44 vid
= self
._search
_regex
(
45 r
'videoId["\']\s
*:\s
*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage,
46 'video id', fatal=None, group='value')
47 in_key = self._search_regex(
48 r'inKey["\']\s
*:\s
*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage,
49 'key', default=None, group='value')
51 if not vid or not in_key:
52 error = self._html_search_regex(
53 r'(?s)<div class="(?
:nation_error|nation_box|error_box
)">\s*(?:<!--.*?-->)?\s*<p class="[^
"]+">(?P
<msg
>.+?
)</p
>\s
*</div
>',
54 webpage, 'error
', default=None)
56 raise ExtractorError(error, expected=True)
57 raise ExtractorError('couldn
\'t extract vid
and key
')
58 video_data = self._download_json(
59 'http
://play
.rmcnmv
.naver
.com
/vod
/play
/v2
.0
/' + vid,
63 meta = video_data['meta
']
64 title = meta['subject
']
67 def extract_formats(streams, stream_type, query={}):
68 for stream in streams:
69 stream_url = stream.get('source
')
72 stream_url = update_url_query(stream_url, query)
73 encoding_option = stream.get('encodingOption
', {})
74 bitrate = stream.get('bitrate
', {})
76 'format_id
': '%s_%s' % (stream.get('type') or stream_type, encoding_option.get('id') or encoding_option.get('name
')),
78 'width
': int_or_none(encoding_option.get('width
')),
79 'height
': int_or_none(encoding_option.get('height
')),
80 'vbr
': int_or_none(bitrate.get('video
')),
81 'abr
': int_or_none(bitrate.get('audio
')),
82 'filesize
': int_or_none(stream.get('size
')),
83 'protocol
': 'm3u8_native
' if stream_type == 'HLS
' else None,
86 extract_formats(video_data.get('videos
', {}).get('list', []), 'H264
')
87 for stream_set in video_data.get('streams
', []):
89 for param in stream_set.get('keys
', []):
90 query[param['name
']] = param['value
']
91 stream_type = stream_set.get('type')
92 videos = stream_set.get('videos
')
94 extract_formats(videos, stream_type, query)
95 elif stream_type == 'HLS
':
96 stream_url = stream_set.get('source
')
99 formats.extend(self._extract_m3u8_formats(
100 update_url_query(stream_url, query), video_id,
101 'mp4
', 'm3u8_native
', m3u8_id=stream_type, fatal=False))
102 self._sort_formats(formats)
105 for caption in video_data.get('captions
', {}).get('list', []):
106 caption_url = caption.get('source
')
109 subtitles.setdefault(caption.get('language
') or caption.get('locale
'), []).append({
113 upload_date = self._search_regex(
114 r'<span
[^
>]+class="date".*?
(\d{4}\
.\d{2}\
.\d{2}
)',
115 webpage, 'upload date
', fatal=False)
117 upload_date = upload_date.replace('.', '')
123 'subtitles
': subtitles,
124 'description
': self._og_search_description(webpage),
125 'thumbnail
': meta.get('cover
', {}).get('source
') or self._og_search_thumbnail(webpage),
126 'view_count
': int_or_none(meta.get('count
')),
127 'upload_date
': upload_date,