]> Raphaƫl G. Git Repositories - youtubedl/blob - youtube_dl/extractor/cnn.py
b32cb898010a0ad0e02e12f3b3a55c3769cc3979
[youtubedl] / youtube_dl / extractor / cnn.py
1 from __future__ import unicode_literals
2
3 import re
4
5 from .common import InfoExtractor
6 from ..utils import (
7 int_or_none,
8 parse_duration,
9 url_basename,
10 )
11
12
13 class CNNIE(InfoExtractor):
14 _VALID_URL = r'''(?x)https?://((edition|www)\.)?cnn\.com/video/(data/.+?|\?)/
15 (?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn|(?=&)))'''
16
17 _TESTS = [{
18 'url': 'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn',
19 'file': 'sports_2013_06_09_nadal-1-on-1.cnn.mp4',
20 'md5': '3e6121ea48df7e2259fe73a0628605c4',
21 'info_dict': {
22 'title': 'Nadal wins 8th French Open title',
23 'description': 'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.',
24 'duration': 135,
25 'upload_date': '20130609',
26 },
27 },
28 {
29 "url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
30 "file": "us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4",
31 "md5": "b5cc60c60a3477d185af8f19a2a26f4e",
32 "info_dict": {
33 "title": "Student's epic speech stuns new freshmen",
34 "description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
35 "upload_date": "20130821",
36 }
37 }]
38
39 def _real_extract(self, url):
40 mobj = re.match(self._VALID_URL, url)
41 path = mobj.group('path')
42 page_title = mobj.group('title')
43 info_url = 'http://cnn.com/video/data/3.0/%s/index.xml' % path
44 info = self._download_xml(info_url, page_title)
45
46 formats = []
47 rex = re.compile(r'''(?x)
48 (?P<width>[0-9]+)x(?P<height>[0-9]+)
49 (?:_(?P<bitrate>[0-9]+)k)?
50 ''')
51 for f in info.findall('files/file'):
52 video_url = 'http://ht.cdn.turner.com/cnn/big%s' % (f.text.strip())
53 fdct = {
54 'format_id': f.attrib['bitrate'],
55 'url': video_url,
56 }
57
58 mf = rex.match(f.attrib['bitrate'])
59 if mf:
60 fdct['width'] = int(mf.group('width'))
61 fdct['height'] = int(mf.group('height'))
62 fdct['tbr'] = int_or_none(mf.group('bitrate'))
63 else:
64 mf = rex.search(f.text)
65 if mf:
66 fdct['width'] = int(mf.group('width'))
67 fdct['height'] = int(mf.group('height'))
68 fdct['tbr'] = int_or_none(mf.group('bitrate'))
69 else:
70 mi = re.match(r'ios_(audio|[0-9]+)$', f.attrib['bitrate'])
71 if mi:
72 if mi.group(1) == 'audio':
73 fdct['vcodec'] = 'none'
74 fdct['ext'] = 'm4a'
75 else:
76 fdct['tbr'] = int(mi.group(1))
77
78 formats.append(fdct)
79
80 self._sort_formats(formats)
81
82 thumbnails = sorted([((int(t.attrib['height']),int(t.attrib['width'])), t.text) for t in info.findall('images/image')])
83 thumbs_dict = [{'resolution': res, 'url': t_url} for (res, t_url) in thumbnails]
84
85 metas_el = info.find('metas')
86 upload_date = (
87 metas_el.attrib.get('version') if metas_el is not None else None)
88
89 duration_el = info.find('length')
90 duration = parse_duration(duration_el.text)
91
92 return {
93 'id': info.attrib['id'],
94 'title': info.find('headline').text,
95 'formats': formats,
96 'thumbnail': thumbnails[-1][1],
97 'thumbnails': thumbs_dict,
98 'description': info.find('description').text,
99 'duration': duration,
100 'upload_date': upload_date,
101 }
102
103
104 class CNNBlogsIE(InfoExtractor):
105 _VALID_URL = r'https?://[^\.]+\.blogs\.cnn\.com/.+'
106 _TEST = {
107 'url': 'http://reliablesources.blogs.cnn.com/2014/02/09/criminalizing-journalism/',
108 'md5': '3e56f97b0b6ffb4b79f4ea0749551084',
109 'info_dict': {
110 'id': 'bestoftv/2014/02/09/criminalizing-journalism.cnn',
111 'ext': 'mp4',
112 'title': 'Criminalizing journalism?',
113 'description': 'Glenn Greenwald responds to comments made this week on Capitol Hill that journalists could be criminal accessories.',
114 'upload_date': '20140209',
115 },
116 'add_ie': ['CNN'],
117 }
118
119 def _real_extract(self, url):
120 webpage = self._download_webpage(url, url_basename(url))
121 cnn_url = self._html_search_regex(r'data-url="(.+?)"', webpage, 'cnn url')
122 return {
123 '_type': 'url',
124 'url': cnn_url,
125 'ie_key': CNNIE.ie_key(),
126 }