- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group(1)
- canonical_url = 'http://tvpot.daum.net/v/%s' % video_id
- webpage = self._download_webpage(canonical_url, video_id)
- full_id = self._search_regex(r'<link rel="video_src" href=".+?vid=(.+?)"',
- webpage, u'full id')
- query = compat_urllib_parse.urlencode({'vid': full_id})
- info_xml = self._download_webpage(
- 'http://tvpot.daum.net/clip/ClipInfoXml.do?' + query, video_id,
- u'Downloading video info')
- urls_xml = self._download_webpage(
- 'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query,
- video_id, u'Downloading video formats info')
- info = xml.etree.ElementTree.fromstring(info_xml.encode('utf-8'))
- urls = xml.etree.ElementTree.fromstring(urls_xml.encode('utf-8'))
-
- self.to_screen(u'%s: Getting video urls' % video_id)
- formats = []
- for format_el in urls.findall('result/output_list/output_list'):
- profile = format_el.attrib['profile']
- format_query = compat_urllib_parse.urlencode({
- 'vid': full_id,
- 'profile': profile,
- })
- url_xml = self._download_webpage(
- 'http://videofarm.daum.net/controller/api/open/v1_2/MovieLocation.apixml?' + format_query,
- video_id, note=False)
- url_doc = xml.etree.ElementTree.fromstring(url_xml.encode('utf-8'))
- format_url = url_doc.find('result/url').text
- formats.append({
- 'url': format_url,
- 'ext': determine_ext(format_url),
- 'format_id': profile,
- })
-
- info = {
- 'id': video_id,
- 'title': info.find('TITLE').text,
- 'formats': formats,
- 'thumbnail': self._og_search_thumbnail(webpage),
- 'description': info.find('CONTENTS').text,
- 'duration': int(info.find('DURATION').text),
- 'upload_date': info.find('REGDTTM').text[:8],
+ video_id = compat_urllib_parse_unquote(self._match_id(url))
+ if not video_id.isdigit():
+ video_id += '@my'
+ return self.url_result(
+ self._KAKAO_EMBED_BASE + video_id, 'Kakao', video_id)
+
+
+class DaumClipIE(DaumBaseIE):
+ _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/(?:clip/ClipView.(?:do|tv)|mypot/View.do)\?.*?clipid=(?P<id>\d+)'
+ IE_NAME = 'daum.net:clip'
+ _URL_TEMPLATE = 'http://tvpot.daum.net/clip/ClipView.do?clipid=%s'
+
+ _TESTS = [{
+ 'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',
+ 'info_dict': {
+ 'id': '52554690',
+ 'ext': 'mp4',
+ 'title': 'DOTA 2GETHER 시즌2 6회 - 2부',
+ 'description': 'DOTA 2GETHER 시즌2 6회 - 2부',
+ 'upload_date': '20130831',
+ 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
+ 'duration': 3868,
+ 'view_count': int,
+ 'uploader': 'GOMeXP',
+ 'uploader_id': 6667,
+ 'timestamp': 1377911092,
+ },
+ }, {
+ 'url': 'http://m.tvpot.daum.net/clip/ClipView.tv?clipid=54999425',
+ 'only_matching': True,
+ }]
+
+ @classmethod
+ def suitable(cls, url):
+ return False if DaumPlaylistIE.suitable(url) or DaumUserIE.suitable(url) else super(DaumClipIE, cls).suitable(url)
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ return self.url_result(
+ self._KAKAO_EMBED_BASE + video_id, 'Kakao', video_id)
+
+
+class DaumListIE(InfoExtractor):
+ def _get_entries(self, list_id, list_id_type):
+ name = None
+ entries = []
+ for pagenum in itertools.count(1):
+ list_info = self._download_json(
+ 'http://tvpot.daum.net/mypot/json/GetClipInfo.do?size=48&init=true&order=date&page=%d&%s=%s' % (
+ pagenum, list_id_type, list_id), list_id, 'Downloading list info - %s' % pagenum)
+
+ entries.extend([
+ self.url_result(
+ 'http://tvpot.daum.net/v/%s' % clip['vid'])
+ for clip in list_info['clip_list']
+ ])
+
+ if not name:
+ name = list_info.get('playlist_bean', {}).get('name') or \
+ list_info.get('potInfo', {}).get('name')
+
+ if not list_info.get('has_more'):
+ break
+
+ return name, entries
+
+ def _check_clip(self, url, list_id):
+ query_dict = compat_parse_qs(compat_urlparse.urlparse(url).query)
+ if 'clipid' in query_dict:
+ clip_id = query_dict['clipid'][0]
+ if self._downloader.params.get('noplaylist'):
+ self.to_screen('Downloading just video %s because of --no-playlist' % clip_id)
+ return self.url_result(DaumClipIE._URL_TEMPLATE % clip_id, 'DaumClip')
+ else:
+ self.to_screen('Downloading playlist %s - add --no-playlist to just download video' % list_id)
+
+
+class DaumPlaylistIE(DaumListIE):
+ _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/mypot/(?:View\.do|Top\.tv)\?.*?playlistid=(?P<id>[0-9]+)'
+ IE_NAME = 'daum.net:playlist'
+ _URL_TEMPLATE = 'http://tvpot.daum.net/mypot/View.do?playlistid=%s'
+
+ _TESTS = [{
+ 'note': 'Playlist url with clipid',
+ 'url': 'http://tvpot.daum.net/mypot/View.do?playlistid=6213966&clipid=73806844',
+ 'info_dict': {
+ 'id': '6213966',
+ 'title': 'Woorissica Official',
+ },
+ 'playlist_mincount': 181
+ }, {
+ 'note': 'Playlist url with clipid - noplaylist',
+ 'url': 'http://tvpot.daum.net/mypot/View.do?playlistid=6213966&clipid=73806844',
+ 'info_dict': {
+ 'id': '73806844',
+ 'ext': 'mp4',
+ 'title': '151017 Airport',
+ 'upload_date': '20160117',
+ },
+ 'params': {
+ 'noplaylist': True,
+ 'skip_download': True,
+ }
+ }]
+
+ @classmethod
+ def suitable(cls, url):
+ return False if DaumUserIE.suitable(url) else super(DaumPlaylistIE, cls).suitable(url)
+
+ def _real_extract(self, url):
+ list_id = self._match_id(url)
+
+ clip_result = self._check_clip(url, list_id)
+ if clip_result:
+ return clip_result
+
+ name, entries = self._get_entries(list_id, 'playlistid')
+
+ return self.playlist_result(entries, list_id, name)
+
+
+class DaumUserIE(DaumListIE):
+ _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/mypot/(?:View|Top)\.(?:do|tv)\?.*?ownerid=(?P<id>[0-9a-zA-Z]+)'
+ IE_NAME = 'daum.net:user'
+
+ _TESTS = [{
+ 'url': 'http://tvpot.daum.net/mypot/View.do?ownerid=o2scDLIVbHc0',
+ 'info_dict': {
+ 'id': 'o2scDLIVbHc0',
+ 'title': '마이 리틀 텔레비전',
+ },
+ 'playlist_mincount': 213
+ }, {
+ 'url': 'http://tvpot.daum.net/mypot/View.do?ownerid=o2scDLIVbHc0&clipid=73801156',
+ 'info_dict': {
+ 'id': '73801156',
+ 'ext': 'mp4',
+ 'title': '[미공개] 김구라, 오만석이 부릅니다 \'오케피\' - 마이 리틀 텔레비전 20160116',
+ 'upload_date': '20160117',
+ 'description': 'md5:5e91d2d6747f53575badd24bd62b9f36'
+ },
+ 'params': {
+ 'noplaylist': True,
+ 'skip_download': True,