From 9815bb0a551468e4939cacfffbc2d5cb8dd12431 Mon Sep 17 00:00:00 2001
From: =?utf8?q?Rog=C3=A9rio=20Brito?= <rbrito@ime.usp.br>
Date: Sun, 23 Jun 2013 20:10:29 -0300
Subject: [PATCH] Imported Upstream version 2013.06.26

---
 Makefile                                |    8 +-
 README.md                               |    4 +-
 README.txt                              |    4 +-
 devscripts/release.sh                   |   12 +-
 setup.py                                |    5 +-
 test/test_all_urls.py                   |    2 +-
 test/test_download.py                   |   17 +-
 test/test_write_info_json.py            |    4 +-
 test/test_youtube_lists.py              |    2 +-
 test/test_youtube_subtitles.py          |    2 +-
 test/tests.json                         |   19 +
 youtube-dl                              |  Bin 75144 -> 112794 bytes
 youtube-dl.1                            |    4 +-
 youtube_dl/FileDownloader.py            |    7 +-
 youtube_dl/InfoExtractors.py            | 4617 +----------------------
 youtube_dl/PostProcessor.py             |    5 -
 youtube_dl/__init__.py                  |    9 +-
 youtube_dl/extractor/__init__.py        |  133 +
 youtube_dl/extractor/ard.py             |   45 +
 youtube_dl/extractor/arte.py            |  136 +
 youtube_dl/extractor/bandcamp.py        |   54 +
 youtube_dl/extractor/bliptv.py          |  177 +
 youtube_dl/extractor/breakcom.py        |   25 +
 youtube_dl/extractor/collegehumor.py    |   74 +
 youtube_dl/extractor/comedycentral.py   |  179 +
 youtube_dl/extractor/common.py          |  264 ++
 youtube_dl/extractor/dailymotion.py     |   77 +
 youtube_dl/extractor/depositfiles.py    |   60 +
 youtube_dl/extractor/eighttracks.py     |   51 +
 youtube_dl/extractor/escapist.py        |   68 +
 youtube_dl/extractor/facebook.py        |  111 +
 youtube_dl/extractor/flickr.py          |   57 +
 youtube_dl/extractor/funnyordie.py      |   31 +
 youtube_dl/extractor/gametrailers.py    |   59 +
 youtube_dl/extractor/generic.py         |  151 +
 youtube_dl/extractor/googleplus.py      |   82 +
 youtube_dl/extractor/googlesearch.py    |   39 +
 youtube_dl/extractor/howcast.py         |   37 +
 youtube_dl/extractor/hypem.py           |   63 +
 youtube_dl/extractor/ina.py             |   31 +
 youtube_dl/extractor/infoq.py           |   50 +
 youtube_dl/extractor/justintv.py        |  144 +
 youtube_dl/extractor/keek.py            |   32 +
 youtube_dl/extractor/liveleak.py        |   44 +
 youtube_dl/extractor/metacafe.py        |  110 +
 youtube_dl/extractor/mixcloud.py        |  115 +
 youtube_dl/extractor/mtv.py             |   72 +
 youtube_dl/extractor/myspass.py         |   64 +
 youtube_dl/extractor/myvideo.py         |  164 +
 youtube_dl/extractor/nba.py             |   40 +
 youtube_dl/extractor/photobucket.py     |   66 +
 youtube_dl/extractor/pornotube.py       |   41 +
 youtube_dl/extractor/rbmaradio.py       |   44 +
 youtube_dl/extractor/redtube.py         |   29 +
 youtube_dl/extractor/soundcloud.py      |  129 +
 youtube_dl/extractor/spiegel.py         |   37 +
 youtube_dl/extractor/stanfordoc.py      |  112 +
 youtube_dl/extractor/statigram.py       |   33 +
 youtube_dl/extractor/steam.py           |   63 +
 youtube_dl/extractor/teamcoco.py        |   46 +
 youtube_dl/extractor/ted.py             |   79 +
 youtube_dl/extractor/tumblr.py          |   41 +
 youtube_dl/extractor/ustream.py         |   36 +
 youtube_dl/extractor/vbox7.py           |   46 +
 youtube_dl/extractor/vimeo.py           |  138 +
 youtube_dl/extractor/vine.py            |   37 +
 youtube_dl/extractor/worldstarhiphop.py |   44 +
 youtube_dl/extractor/xhamster.py        |   61 +
 youtube_dl/extractor/xnxx.py            |   45 +
 youtube_dl/extractor/xvideos.py         |   43 +
 youtube_dl/extractor/yahoo.py           |  113 +
 youtube_dl/extractor/youjizz.py         |   45 +
 youtube_dl/extractor/youku.py           |  104 +
 youtube_dl/extractor/youporn.py         |  117 +
 youtube_dl/extractor/youtube.py         |  795 ++++
 youtube_dl/extractor/zdf.py             |   65 +
 youtube_dl/version.py                   |    2 +-
 77 files changed, 5314 insertions(+), 4657 deletions(-)
 create mode 100644 youtube_dl/extractor/__init__.py
 create mode 100644 youtube_dl/extractor/ard.py
 create mode 100644 youtube_dl/extractor/arte.py
 create mode 100644 youtube_dl/extractor/bandcamp.py
 create mode 100644 youtube_dl/extractor/bliptv.py
 create mode 100644 youtube_dl/extractor/breakcom.py
 create mode 100644 youtube_dl/extractor/collegehumor.py
 create mode 100644 youtube_dl/extractor/comedycentral.py
 create mode 100644 youtube_dl/extractor/common.py
 create mode 100644 youtube_dl/extractor/dailymotion.py
 create mode 100644 youtube_dl/extractor/depositfiles.py
 create mode 100644 youtube_dl/extractor/eighttracks.py
 create mode 100644 youtube_dl/extractor/escapist.py
 create mode 100644 youtube_dl/extractor/facebook.py
 create mode 100644 youtube_dl/extractor/flickr.py
 create mode 100644 youtube_dl/extractor/funnyordie.py
 create mode 100644 youtube_dl/extractor/gametrailers.py
 create mode 100644 youtube_dl/extractor/generic.py
 create mode 100644 youtube_dl/extractor/googleplus.py
 create mode 100644 youtube_dl/extractor/googlesearch.py
 create mode 100644 youtube_dl/extractor/howcast.py
 create mode 100644 youtube_dl/extractor/hypem.py
 create mode 100644 youtube_dl/extractor/ina.py
 create mode 100644 youtube_dl/extractor/infoq.py
 create mode 100644 youtube_dl/extractor/justintv.py
 create mode 100644 youtube_dl/extractor/keek.py
 create mode 100644 youtube_dl/extractor/liveleak.py
 create mode 100644 youtube_dl/extractor/metacafe.py
 create mode 100644 youtube_dl/extractor/mixcloud.py
 create mode 100644 youtube_dl/extractor/mtv.py
 create mode 100644 youtube_dl/extractor/myspass.py
 create mode 100644 youtube_dl/extractor/myvideo.py
 create mode 100644 youtube_dl/extractor/nba.py
 create mode 100644 youtube_dl/extractor/photobucket.py
 create mode 100644 youtube_dl/extractor/pornotube.py
 create mode 100644 youtube_dl/extractor/rbmaradio.py
 create mode 100644 youtube_dl/extractor/redtube.py
 create mode 100644 youtube_dl/extractor/soundcloud.py
 create mode 100644 youtube_dl/extractor/spiegel.py
 create mode 100644 youtube_dl/extractor/stanfordoc.py
 create mode 100644 youtube_dl/extractor/statigram.py
 create mode 100644 youtube_dl/extractor/steam.py
 create mode 100644 youtube_dl/extractor/teamcoco.py
 create mode 100644 youtube_dl/extractor/ted.py
 create mode 100644 youtube_dl/extractor/tumblr.py
 create mode 100644 youtube_dl/extractor/ustream.py
 create mode 100644 youtube_dl/extractor/vbox7.py
 create mode 100644 youtube_dl/extractor/vimeo.py
 create mode 100644 youtube_dl/extractor/vine.py
 create mode 100644 youtube_dl/extractor/worldstarhiphop.py
 create mode 100644 youtube_dl/extractor/xhamster.py
 create mode 100644 youtube_dl/extractor/xnxx.py
 create mode 100644 youtube_dl/extractor/xvideos.py
 create mode 100644 youtube_dl/extractor/yahoo.py
 create mode 100644 youtube_dl/extractor/youjizz.py
 create mode 100644 youtube_dl/extractor/youku.py
 create mode 100644 youtube_dl/extractor/youporn.py
 create mode 100644 youtube_dl/extractor/youtube.py
 create mode 100644 youtube_dl/extractor/zdf.py

diff --git a/Makefile b/Makefile
index e00f5e6..85dacfa 100644
--- a/Makefile
+++ b/Makefile
@@ -40,15 +40,15 @@ tar: youtube-dl.tar.gz
 
 pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1
 
-youtube-dl: youtube_dl/*.py
-	zip --quiet youtube-dl youtube_dl/*.py
+youtube-dl: youtube_dl/*.py youtube_dl/*/*.py
+	zip --quiet youtube-dl youtube_dl/*.py youtube_dl/*/*.py
 	zip --quiet --junk-paths youtube-dl youtube_dl/__main__.py
 	echo '#!$(PYTHON)' > youtube-dl
 	cat youtube-dl.zip >> youtube-dl
 	rm youtube-dl.zip
 	chmod a+x youtube-dl
 
-README.md: youtube_dl/*.py
+README.md: youtube_dl/*.py youtube_dl/*/*.py
 	COLUMNS=80 python -m youtube_dl --help | python devscripts/make_readme.py
 
 README.txt: README.md
@@ -57,7 +57,7 @@ README.txt: README.md
 youtube-dl.1: README.md
 	pandoc -s -f markdown -t man README.md -o youtube-dl.1
 
-youtube-dl.bash-completion: youtube_dl/*.py devscripts/bash-completion.in
+youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-completion.in
 	python devscripts/bash-completion.py
 
 bash-completion: youtube-dl.bash-completion
diff --git a/README.md b/README.md
index 2f3c81a..929f2b5 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
 % YOUTUBE-DL(1)
 
 # NAME
-youtube-dl
+youtube-dl - download videos from youtube.com or other video platforms
 
 # SYNOPSIS
 **youtube-dl** [OPTIONS] URL [URL...]
@@ -121,7 +121,7 @@ which means you can modify it, redistribute it or use it however you like.
                                video (currently youtube only)
     --list-subs                lists all available subtitles for the video
                                (currently youtube only)
-    --sub-format LANG          subtitle format [srt/sbv] (default=srt)
+    --sub-format FORMAT        subtitle format [srt/sbv] (default=srt)
                                (currently youtube only)
     --sub-lang LANG            language of the subtitles to download (optional)
                                use IETF language tags like 'en'
diff --git a/README.txt b/README.txt
index c5e2ce3..b58f682 100644
--- a/README.txt
+++ b/README.txt
@@ -1,7 +1,7 @@
 NAME
 ====
 
-youtube-dl
+youtube-dl - download videos from youtube.com or other video platforms
 
 SYNOPSIS
 ========
@@ -136,7 +136,7 @@ Video Format Options:
                                video (currently youtube only)
     --list-subs                lists all available subtitles for the video
                                (currently youtube only)
-    --sub-format LANG          subtitle format [srt/sbv] (default=srt)
+    --sub-format FORMAT        subtitle format [srt/sbv] (default=srt)
                                (currently youtube only)
     --sub-lang LANG            language of the subtitles to download (optional)
                                use IETF language tags like 'en'
diff --git a/devscripts/release.sh b/devscripts/release.sh
index b8efdab..d32ae47 100755
--- a/devscripts/release.sh
+++ b/devscripts/release.sh
@@ -14,6 +14,12 @@
 
 set -e
 
+skip_tests=false
+if [ "$1" = '--skip-test' ]; then
+    skip_tests=true
+    shift
+fi
+
 if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.06"; exit 1; fi
 version="$1"
 if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi
@@ -22,7 +28,11 @@ if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit
 
 /bin/echo -e "\n### First of all, testing..."
 make cleanall
-nosetests --verbose --with-coverage --cover-package=youtube_dl --cover-html test --stop || exit 1
+if $skip_tests ; then
+    echo 'SKIPPING TESTS'
+else
+    nosetests --verbose --with-coverage --cover-package=youtube_dl --cover-html test --stop || exit 1
+fi
 
 /bin/echo -e "\n### Changing version in version.py..."
 sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
diff --git a/setup.py b/setup.py
index eb7b1a2..61435fc 100644
--- a/setup.py
+++ b/setup.py
@@ -2,6 +2,7 @@
 # -*- coding: utf-8 -*-
 
 from __future__ import print_function
+
 import pkg_resources
 import sys
 
@@ -23,7 +24,7 @@ py2exe_options = {
     "compressed": 1,
     "optimize": 2,
     "dist_dir": '.',
-    "dll_excludes": ['w9xpopen.exe']
+    "dll_excludes": ['w9xpopen.exe'],
 }
 py2exe_console = [{
     "script": "./youtube_dl/__main__.py",
@@ -57,7 +58,7 @@ setup(
     author = 'Ricardo Garcia',
     maintainer = 'Philipp Hagemeister',
     maintainer_email = 'phihag@phihag.de',
-    packages = ['youtube_dl'],
+    packages = ['youtube_dl', 'youtube_dl.extractor'],
 
     # Provokes warning on most systems (why?!)
     #test_suite = 'nose.collector',
diff --git a/test/test_all_urls.py b/test/test_all_urls.py
index dd67286..d3ee296 100644
--- a/test/test_all_urls.py
+++ b/test/test_all_urls.py
@@ -7,7 +7,7 @@ import unittest
 import os
 sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
-from youtube_dl.InfoExtractors import YoutubeIE, YoutubePlaylistIE, YoutubeChannelIE, JustinTVIE
+from youtube_dl.extractor import YoutubeIE, YoutubePlaylistIE, YoutubeChannelIE, JustinTVIE
 
 class TestAllURLsMatching(unittest.TestCase):
     def test_youtube_playlist_matching(self):
diff --git a/test/test_download.py b/test/test_download.py
index 577bcdb..8d86984 100644
--- a/test/test_download.py
+++ b/test/test_download.py
@@ -14,7 +14,7 @@ import binascii
 sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 import youtube_dl.FileDownloader
-import youtube_dl.InfoExtractors
+import youtube_dl.extractor
 from youtube_dl.utils import *
 
 DEF_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests.json')
@@ -72,7 +72,7 @@ class TestDownload(unittest.TestCase):
 def generator(test_case):
 
     def test_template(self):
-        ie = youtube_dl.InfoExtractors.get_info_extractor(test_case['name'])
+        ie = youtube_dl.extractor.get_info_extractor(test_case['name'])
         if not ie._WORKING:
             print('Skipping: IE marked as not _WORKING')
             return
@@ -87,7 +87,7 @@ def generator(test_case):
         params.update(test_case.get('params', {}))
 
         fd = FileDownloader(params)
-        for ie in youtube_dl.InfoExtractors.gen_extractors():
+        for ie in youtube_dl.extractor.gen_extractors():
             fd.add_info_extractor(ie)
         finished_hook_called = set()
         def _hook(status):
@@ -125,11 +125,14 @@ def generator(test_case):
                     self.assertEqual(md5_for_file, tc['md5'])
                 with io.open(tc['file'] + '.info.json', encoding='utf-8') as infof:
                     info_dict = json.load(infof)
-                for (info_field, value) in tc.get('info_dict', {}).items():
-                    if isinstance(value, compat_str) and value.startswith('md5:'):
-                        self.assertEqual(value, 'md5:' + md5(info_dict.get(info_field)))
+                for (info_field, expected) in tc.get('info_dict', {}).items():
+                    if isinstance(expected, compat_str) and expected.startswith('md5:'):
+                        self.assertEqual(expected, 'md5:' + md5(info_dict.get(info_field)))
                     else:
-                        self.assertEqual(value, info_dict.get(info_field))
+                        got = info_dict.get(info_field)
+                        self.assertEqual(
+                            expected, got,
+                            u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
 
                 # If checkable fields are missing from the test case, print the info_dict
                 test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
diff --git a/test/test_write_info_json.py b/test/test_write_info_json.py
index 8134dda..b5eca76 100644
--- a/test/test_write_info_json.py
+++ b/test/test_write_info_json.py
@@ -10,7 +10,7 @@ import unittest
 sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 import youtube_dl.FileDownloader
-import youtube_dl.InfoExtractors
+import youtube_dl.extractor
 from youtube_dl.utils import *
 
 PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
@@ -48,7 +48,7 @@ class TestInfoJSON(unittest.TestCase):
         self.tearDown()
 
     def test_info_json(self):
-        ie = youtube_dl.InfoExtractors.YoutubeIE()
+        ie = youtube_dl.extractor.YoutubeIE()
         fd = FileDownloader(params)
         fd.add_info_extractor(ie)
         fd.download([TEST_ID])
diff --git a/test/test_youtube_lists.py b/test/test_youtube_lists.py
index e8b49ff..2c46edf 100644
--- a/test/test_youtube_lists.py
+++ b/test/test_youtube_lists.py
@@ -8,7 +8,7 @@ import json
 import os
 sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
-from youtube_dl.InfoExtractors import YoutubeUserIE, YoutubePlaylistIE, YoutubeIE, YoutubeChannelIE
+from youtube_dl.extractor import YoutubeUserIE, YoutubePlaylistIE, YoutubeIE, YoutubeChannelIE
 from youtube_dl.utils import *
 from youtube_dl.FileDownloader import FileDownloader
 
diff --git a/test/test_youtube_subtitles.py b/test/test_youtube_subtitles.py
index c80c90c..129e239 100644
--- a/test/test_youtube_subtitles.py
+++ b/test/test_youtube_subtitles.py
@@ -10,7 +10,7 @@ import hashlib
 import os
 sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
-from youtube_dl.InfoExtractors import YoutubeIE
+from youtube_dl.extractor import YoutubeIE
 from youtube_dl.utils import *
 from youtube_dl import FileDownloader
 
diff --git a/test/tests.json b/test/tests.json
index 3e0db29..01367b0 100644
--- a/test/tests.json
+++ b/test/tests.json
@@ -630,5 +630,24 @@
         "title": "E3 2013: Debut Trailer"
     },
     "skip": "Requires rtmpdump"
+  },
+  {
+    "name": "Statigram",
+    "url": "http://statigr.am/p/484091715184808010_284179915",
+    "file": "484091715184808010_284179915.mp4",
+    "md5": "deda4ff333abe2e118740321e992605b",
+    "info_dict": {
+        "uploader_id": "videoseconds",
+        "title": "Instagram photo by @videoseconds (Videos)"
+    }
+  },
+  {
+    "name": "Break",
+    "url": "http://www.break.com/video/when-girls-act-like-guys-2468056",
+    "file": "2468056.mp4",
+    "md5": "a3513fb1547fba4fb6cfac1bffc6c46b",
+    "info_dict": {
+        "title": "When Girls Act Like D-Bags"
+    }
   }
 ]
diff --git a/youtube-dl b/youtube-dl
index ea6d889ba2d69a7a9c989380e97ccd92186b8fb2..ffe9a4a46910617eb12fc85c4df4d238a11a397f 100755
GIT binary patch
delta 101234
zcmZ^qQ*fYP@TOy9V%xTD+nLyyXkvbGCbluLlZkEHwrxB6`)}>dZq=TuK2?2js`{?q
z_w>_$#vw9pq2h=Ug+W$C;aKm70R|8d<ah>ZXl=(~mH#4Mj~Ez~R%A`LBDEB#O_)78
zBpXj;(^|R<6ju|60K?}eKc5=?da9D@o+YL^cDf2X<VlgqZEyRZQ{L9bYgkp$7OiRB
z?vrr;+@ZQ%F6mnl^0q|Z&dK!f&&H2V_p>6i>fDAASO1^qjN!UZQ9y$$FIK&a-}`|~
zm%jz2c^m31aB7X7YUguD9vDl{<3O~O?|EmYqE%rPsK9l$sP?MdA?0YNQGvykpM7+g
zpK->el*6BnWj!x$cpT!Pv6(87Y>}^6zB+BP71J`<*(u;dkp7IKk-?w0{J_X|-C~bc
z)c9wu=uf#SyKsqxezJmKP~D;^*7P_Yy_h3N9wS3O(9Vyh(IA{_oi6e0x@A&^|3^(i
zXKO=J7$<n0V`ha_V&(fxd_E%TbUvoQMMEUH7;H^Lxgln)q1BFZ))S|y9O_Rhr$1U3
z3bVb2mf397kLaa*N@`|3F4wjUai&Zsm&EOGR#^%g2c|AOG%(G-^|I{d8<NfDu0#yG
zhCflT0htqO2DoxnxcG4TK9SOZ#=CNSH2WJ?JJ3!E*W%D(&|K3K#NJ+e(dT7ubr@*J
zp6x$Q5)0a8z5cWIsXWA;*w`uNUeHO=n)&*gIL+`Y8qbJ}&&vLs{>+UnqXAtU#z?N|
zgc^uORI^HQFGvN6T){c`Aq#P%Qf9FIx*)HffOxC;rbnk1f#HGq5_3wUFr_^R$w3%8
z59}EmJGEdqBWM!5c*5WI;-MZ=LE*Btl$^U2QGz#<QoYC!V$rC00-PT=sGaUqh=@O?
zf5YdqEu5G4(%c9eb@22Y_B+E-&igPfF{`SmG;$xDqqnH_q%mvFYm1vpklW2QRm`VY
z0vMCRxNlZSXf;)Q`4hfdjK-l`E_5nQh<7*-E{sM-c?@Y5&f?rvM7a!LYE`$O2)O<~
ztR)BRgZ-kM1v=&K1H;U9v^FOb)+ogfK^p9`r`s^0vRc$3S`aBYkPD^O8>~DeX$-`_
zti@~p%nkjG>M6LVc(5xakijzZ5$Eg20bmshcC2VK1V$#T8=B9gNu3foqIYxfb5q;u
z6CkKQ@Iz3?E)u1Itud)!g*8w?^UUe=?<IXieiGrNgqeU>XJj#J4L=4VyMG7_srgmM
z_Twl5@Qa#cGRfbIhxv^*ea9LS9}U&_s3{^wXAhDyKRE255<|~HH(F25`pVxp0Kq+*
zkyYk^fMDjtN;tbM%urDSNH1%QDZAe<A=8GdMyZ=Nntd%<!9C@vgpBD{N3M#d9&;6D
zoekx=!fdAa7yoGc8Z&$rF_=lJVx-S#s~aJpH0@k$iFvY&U!ha;u$c4Mj8n5PUum@t
zzjL;G(Cv(^=1Coh)CtgAAHkOc0pnj<w%#}Sa-hhnJQBN~xC3Nv;wBK|+!MQ=6>rN~
zr8zUWsF$D90=kWv949Q)4BBsMDhAmOW969n)y9N5+=W(aHi+;UB(PWsmteAP$E6wc
z%az6|(I=PKKeGfH@?T3)oU3&;7qd%MqJNYRE;sr^A{><Z^zLh>DT)%g0>*D2AQM5h
zCxHzd8Bp30rMRD;cXu47c)o960s(2R@Oqqg->jxzP<|`rvGfHjnk!)8dF0*vwI|^7
z&3&$BNJ)NwT9U|K$%R9~>j>Kx!zZGyD01OFD!AGVg5Z+%(u4Z~ABfT(+M39aq-(}e
zXX%se!1npkiD@!9s}6-bfs|O`mIw@ZWDjV%fS+PXUJ1`OJ$(9vZ$gG`b7J)(8elr5
z{wbth{7$dV<@z&Zkr3Q5y<-xt^MPY9=dx9_fY#;<7|IrHK!oY9NNGLSioSTye|J#c
z_R@doFtLI{S|KX;LgQx|LQrPKU8Nun{2&U!HepiFVKM8|TYp+I1MYFQuI74}ju72)
znNN-y=C7n-unp+4;nw06#e$OtCa;|ORTR=0a@89jutYkd6JC)&=B-SGL`tDhZKQ>`
zASl-$QB(dJK_bV{eIeqLz20LW;D|X@1o{q{`k{)7bU&8&iM!C@+W%^3vZk%ipJGmL
zbgXUx>FSqKU$#<%0s>Vn5nw*2=rJ4)$S7|9Wvp&WDQp@@$xvciYc-VI*dAyy&D}|w
zfNb-UCyFnhc4RPBih!!M&<1hg=u}bdN{%B{OIDW;eYnsKWVD&(`ohO{aInZ?<v!)*
z+8`HYAUY=xiW(?}A4YfhI%od##1)@}I|<K1EC#c6*H#=$1fKC|F-ygu&Ucf_1C*B{
zzF55=k(qAt7Mh18S<!fsR@;W8)F1+EEDYw<0x26IdyLA<bFd3*(i-9scS58n%xi?=
z;V5XgXRRN=wId$P;NbB-y4sEm&l^AG0<!zoLfb;rYA-a^$e1p<m_<T%P?V?BWlc~N
zN<7n>=dA<VfH!SY@=rL_#eX&d4br~JhPn)9Wkys*i=$^x5bve_#24Y~Z1*dt`H$LT
zTc>s9_CXY23*2`RFYLG5?MbaRgc-0*i2KrhVIli(5oN5cD0Zkn&lUJ%x}e&nqWC8s
zN@dq&yCIPxkrP6X&P&7Nn=9e8Y8RgOY{dY?q^k2%z*jewxhi1_EcjPOILAsDv!c;k
z+2cO33Z(L3KUG+mtO)-Cu1N#+T;_pa`gE$<posH5QiO<W4qQGI3>e2qJg4j)%JUh}
z+XhaH%tyzrCS#XME++~w#DJ03jDS|gV{LZzX}h`N3<~)?ur^iXaw!D%Ma;l969(~z
zOsQJ~bns7^{E(Pw_^lj+?;J*;vWb&~V4<!x*jC!GZv<35@EZ%<yn<d|qX$-IYrlk7
z$G6F}27$=o+h(4x&zvtmF@Wjx+V;&`68H}K`a=6MFf2PlunhnMJ7Y$uSsp(jdqq?E
zlOwp8hO39Jf=oG$1>NHgt&*P?O;L;ojqU=hy}B^;#L>2{o_3tc#?_jlH=d$vj}jO^
zy4({J5fUmQA@2$102bu?L9tZBkwM^Z#&4ClT>8t>;!4KkF;>J)u2K!wqH9;!3k~lq
zmHu|RQDi%xxZ%exVOw_R<D<KIr*PDE%!(&={ASr}P*m&0ZsMt!m|tK#_a`spYq5YI
zJtTcPWGN9?D9w5iPeV7mVJRbsCrI7yDQEHoowZCSE@+@VM`hRz!Z*3HD_kV^4UZpq
zga4%>=nH{W$;)s2&)1Q3`*P6-(zlQMY?$KVuM<zVK)`AS9MOS5yN)Y6?@?lAsFx*S
z4?OH>=jG1|)~ffD3ff6NJ;(>58#CbNthE$QCe+!A1Y>7$g(LlPT3LG3thNw_s#16^
zX{OeI`c_CiD1eGR0@{Yz3>MA*7e7EcQ`=PbP&LU?nU}@<Z${nzN#g#r(Cf=dBFYb0
z9>lf`pS{20+uxJB$nZ1@+dnws_08AG{;p#*aWc1~&_0Rgb@6#<Ji<$4Pig@7byU{3
z1WI{`;E)x5WET>3Rr-VH9GD=T@LIn%jA_{J5c8e0-(so@>Lj*N8lY2<8|j28+>)5;
zGDNh%0~bw1yAya7yC$T*sXS8!<huAhxdOhW3}X^1)~~x;eSDvYzB|5tKM0wO()%YX
z;2_#3vM;nz)grxHB)O=9P;vpTi&crHwv&1Rw~7BEvt;ihBH(rT+x0gaY;O&bS+G;n
zJ+Avb1Otm+3>HSI`B7{1%)-U@%N8Rtq`7geQ#`#LR4JxH_-L8x5o9jy&7}3UuTpd6
z#BIK;#6n$nW!enHddZt1>=-GHHk6`4CAmGc^Fuh2F;j+qcOn_#bcO=?|1?I8$&bg`
znZUIJZK`V^UfvG9Iw8lK5;7Xa6?sjEe({EvbDMXr9=ff^+kLEzI8)0AOU9$|>*lE~
z4q<RLI@)4#i0k+U-(?cII9=2*!BP}B*DEUdH*#M8W<SAU?pMP%n>lQI(T9ZuJ$p<(
zn`l%s;I&Y6Rm#yJ&Po8R5GO`pK*$}hDNb=H_Nj-iL?dSq^<rjdTg8UWHBg%k2=s%}
z-mp`PmywC}$4+=^@UKu#quC&Y1)1zNe9V5~L6boHezBrZWN40NG_Bgi&pNOR3vUcx
z&^N*S(jGn=2-f3<1>ewgQ*oJOz4ZuJcJ;L8yB1DHHcRaJG1mg{xorRy@$0?|^xzUf
zcTgjg0$u_oW)k+K-*~gM-zQQDtMI4P=3F#L;gzRRqcgz`zIEXD35V^7qme_z1*rwh
zlz-$@<n1N&Blh->6CK3gF}QBgjMF`Oo(o<d2%o7-&dnfjrzh(u#4TG4!{*TSTW1b{
zF~v`+%1}c8hEkwc*^d|no?Bc$eLk%P%?PBM3%6U2yjLll-m*q@->4@XZ1wK$V$XX|
zJ~x#5`mT)@d<udrci&x#2=MWJSPnYcoSn5SjeH7uG~a}cWa#-%T*}#t*Iu|2NXoHv
z+a(Dp+!fQtdVz~LVvAHS$wb2&=>|p21w_n6BW-ocQ3BIlKF{_GAVk606l6saE0yrJ
zUlW#VYV;7HTTxvt+})@XEeCyxT^F347_*-VqH+yV$e0Eo9TATD^0pYJL4UQtv}Dz%
z9iQEHuf0tFj37wIu$jkiyEJYrQsUUiqq(~yQb03rrF~|yQko?T&R~8CoRRDaKoJrT
z{b#c}WPv|&)Ge`H!VFfG9Ss)i8Yk)2ZF5sYE8ur0(bn|><G&-gspVmk2zXg)txroM
zRfEA0#LNG&*!Y=|{Cimookc??ma^0UhmVDavlfIuFM>lI;U~t1-7$k8eSs*U?MRaj
zt(An$p3@PJ*UgwfS=A62k!StUT@wXiC<+gfrUk%Zh3wmElPoxB(^xq#G35g!L%mSs
zP}49Wqth5GR$f_2JY_r-p1d1<R_qIW{sR9rZ9yO?MSggpmTtHg;>4YpN~MY;W71A)
z!mE;nx$SC|qGW#6-6Qbl+F@(jTCQ`5dGq-gk-*hrAnspt?VV9Pc(fFscOoG56XUxV
zDFF9F5Lh`R45*#bR$Z%CSwft9tIih4Wtg=b*#_s)mpK_yvntP?Dm!mcyvU499ZV?S
z4|jWE_e5PhEdEniUSilme{>N#bM@6h>>_ME?#n@tMgqAH;eSSsIkga>5~>tMeT*U@
zYWzJdZ%x1RyRw#|yIGty6#DWg(mkf4n+KGE7rh^!2C{RCi*w6^&<f<f-I|+DN?Tru
z13_$O(Ik_I+f(OEwVbO4wV-RnzRbK)64%@ru2r~QLpauiWA%}dL`;fCm~UcWYe*r2
zq0nx3-iQRFcpM(XbduLjVp7Cr-SUWqrKH@+Fv$As%x<cWPCYwu+NR6sivkKNJb>c?
zL?eIjTK`q~E9BLiS4K3#HE4oqL(B%99pvzP3ojx65e#(_S6AU8ia2vm<%EuyxkmCh
zAFU*3@j=a|6-jRUbCL-K<qK6Uhvgkn8{=*cmrC**X-(Jx#~^uav-Jf7JnV?EMEOnM
zS;=v*a3vl&u;&kM#xTbpjoJ;q2Bc3*{6fTexTU#kH3MxVIzf7cbd4%bL^4*PaBTs1
z_&AH>RRX_=W)B-XSwxpo4dRz@6N<WBZQuHx+%uuK-<9bRYm5#1v(<3CE&K{p$y9Gz
zBc!7@wI3s}sc2}dS(Ux%1S{7+XJrN>>B6QB-040f4?7JJajZo=U_EMj5f~-$VF~#c
z_M_o44_Z5%A*!4-v)sh=&KcS<SXV%QxcX1Oxz?pE_SQfTa#Z-h03H(F9Sl4Cb5&42
zhr14w(iA{GXomm!6518zXx^Jv(*<=xucyC}Osn3R@(ZzQfGpLGARNJ+c~~W$^HLqs
zN+C6!ipgA<DAYEdZUy!@AIL^N1vt<~{_=NXa<*>z+PP_6ZNo$cjXM^d?V2=~L|g_E
zHpy>VI?*@hqrsXSD521{SvQMd%w0bfIM1yY*tn%?=?C=+9O~;H;7CAm^kOzK5_hhG
zJeYSe<Hu{Ck8Af|-B!RuyN`&frM)f8r+4lMjwZdBskD7y>B9&%0Ks_=4Hn)ke$pC(
zRRPLBcY3!7{cq7-Z!u`!meKvfTt#mIDis7(6;iYzedtP>&4SDjGbI>+ZtjY!Z!0Fp
zbc4X9Pe2(o-X;jf&z|B_wt*(SJ-GFWl^^OJcsQvF0BiaFJQ^l)p(2+Rc*y%5$%hi^
z5SrrKw?#{6u=QRF&^F%idYRL*srIXD)Ed|@MS?5LOdIXVy<pMEek6l|eK4qnv>aBE
zjkHr!)ScLW9eI+>;U<-%7Gb-5Y`*WWE;iqiUBJ~LkODw?SXxYw7^E>=1#uIP|9<f<
zcP@z53p@fWFwJOKr!c!*M=0g@Gx~`kn^5rjEa)i6k@es|{pJ1eu_E|;2~!G7NL_z@
za==*AANNy)JafnX6St^4I>WPEg*}$M{H(WBNW>qs4I^W>^kZB{bUGNz%e)wbnUFe<
z;haMo)w?$W3T)LA*KnkGGE0`nLOY`n6Bo3>crxiEVUShRk}Ht<J{&QsQ3-D@dLsz0
z-Q)w2=gtObkVN!{b%Dq=l|q%s0AB+U8j(yK6^9z+;eO%^CjCY0?l$q|ZZSG{n#;Tx
zXn2V*3s!S-)9?rMFiK8oe}%Qz`@e2|@wfM1iD38PW*M6*N;Az2IE-F-ePQOz$$Zk|
zH4TWlCfqM-8MypZrbD#_AM7C$`^AANpU)-LcN9?oq+Y_{X!18UkpNjsE|&*U!`Lr9
zWew+(Q0U9J(lY4Z?Kh3yX{k5pu_+lhC&EJ08Z=uY6>MuOhl%OTU|a{-aZVf_sMOg^
z@0#+|sPF0fhz8)D^SV}z<8u|o9&?KlXLu@bcg#AOks^nMT;2<q<e)__gPxgbnmGtq
zT@l8>!14HtE|jvaZr3^8u$Ynj54bLPp0*BBg9ix5Xk=4Iv5M`L60s3whm}+hab5eL
zR0fYIt!#B3Xc+Psq!RxEf@ZAVKktpo*(}uHF2P!YeujVsdnLC<{!QpuS6w2wrex@4
zCZ@$awMi+Oy?nV67vh{)6)TGbJY*uav)vSd1RKlCl+EZN&}A5@y~vA(YlO#Gry)=f
zPqxYW`Gcp2hDhf)$b!p>oQ)XQ*qirFD{%w<6Iue7+H?6Ef5XJ64o_nrA@KEE!Cm*e
z%2C&opSQx1(^FG;^!jkjx1Y@54!R+GsOFj9(vgb%)r<EDCP&9S5t7Q!x8&qQ=D8&R
zv?Uh$zo1^OeraoC?xK|;8sVk#d4J-lB<B=uMnigePUu9oe)`U%1(aIoki#t86)hI2
zb!^Lgo(UXO)1xYbv5TT<Uf8o~AZvqisvjL+>Q2bR^J@}W$RNx2@osXplnI=CGi%TZ
z`E+VTFmg}em1BGrSw33aR{bNFG`SJ*3d6Xdw)4g1Xi+jYfabF&vUv2i_p(d&@@ek(
zEkf#LX0vLpcl*1sWq~hSwd&7jogDygH(XlpWXzI%>g)!3cbe!4qD3@vucfjW9QD2c
z;%*A59E<}%D{YXI7al0$c4eBlK*L&naDX{??!iD<cR5vbrkO}ZoP<~8qgw&c*;Mvg
zSVwUr>3^{m`8zR?S<%DKl%wZ?Z`PWhHcdt+D;!i%wQT;mAXJNy`nu1733X|9)AthS
zbmabLE<@v`CT|`lY_;SCqLIz2*08<Kr`87Lxg@k`n|U_}&doZ-uEGnY@Nzfno3*ch
ztalaA%rgB*3xmgFI#z=7{H=jz1OlRo5EMIKyL9-|onK_s@ohYmMZKgVI-%Syo{>l5
zU}S`8t!L8Ti2-NN8oqMy;@vhc7u!X=+kdNj0_oE+)G=XQF`F7cqAD7?AXoi`&d)G|
z+WG=yze*x$hS7#Zjdt)JZUchGK}>yNlzqM6xX|3jF8oy`OSK6})w_X<KHBPqEi0in
z-8HSXa(QM(m=VyAaa~Qp@!EHwh^NmvO-X>UCt(;t!a_n%!`&sL3v*XEABTMw`BroR
zm;W;@#W~)Jet5yiXyc}y=#wGp2=*#p;@I+c{>XiT2sU?41aVQ`lp{KJ_g_cdfPskv
zN6X9E*Jt7hkBHrpTNwadE*q1N;F&{JpRf@tjZwK@nO6HPb!&dtp9Rq-b;_8&5|gl2
z7qn?csk^K0%z$k_=dS|b@%p(s12+IqK-8FK!_8T6;wtRx6rZwIDyW*zF&9<bBZo5{
zzDIChyjg(4xb<(a17lUPtXH<eJj$%4Jp&fwSIPc`XuRv^XfAN8Zd_8z9u~wPM4V<{
zf<*jvdEw&CzGq7L!o02WvCb(KX-SgQ(l}rn=&gDpenu4YQO{2z2wuh}?aOK6Y9bv}
zLqL*>JuDx<!@qb~?aYW8MNSz%lG)aw1X@%M<~$QHb1nNieuJIPVKm<pnBLL(QjK#?
z&ys{%jHBZ;a15|APS5htCkGXmEh$8S<+~7uvy&K20wt-KCNBx%7z#QX86L3|*!12D
zj!{zR)1IH^f4LPa?4wWHa<QBW5@2Iqt*f4tXoR3;95~<gCbw<A)f*Soa2kkAu*w=@
z$tP%BNho3h^5i%=!<KdTFNkjssVni|ei|ehByX5W;sP_O?DzR#m3hz#2A@kAnxJrf
z<hLiyM4#l`V)^Pk3>q3awx-0T&lB@8p=>6c&mc0snuqcTsEe3aF@6px$nhP72JqRj
z`8zR#MO8bstU`#M!GT0_qQRIG#Je2N5T*ZAsS}Eo?BSBhD5OOh@V(SjS7*!&BnvIt
zKPSAJr~p4Vr}%>Sd|Zogcv3Ban+DCvj|eB~#Xo9=S%LIfvPDn?!86rWbkcUmUg!`L
zG>u?QHEJMlUQw2}u|do?HAhe|H@Ig+gt1<jq2N2{5fJ%3uHB-(ZNKY7Tf+#T`^JdC
zXGCkHTvPY%QkSLv5<FESW!Uz+-h$fX8EQy~WB{mFucIA*bCamtBBsGz@!dGq;4g1O
zlf~-aqV+j(@IJg5)ejzPb9nPFG#d)?ytoAkSs~nwystL9*4m-;$wu|{y!s_lJ}5u8
zF<~?TcWNdr^cwJd%d6lO;?6h80QO|N=o}cV2JTz21X;+fQ(c$R@YH8i4(9g_e|o+0
z382UKPZrV378KY1{V0O!T5;KJe`OmK*5CE7z40HJ)hTB~xZ3b-&R|x#!{w*q4d1EL
zg78ZP_RkDSKir{8j)GWTp(-(H8~5+#-XAWqkRcD4^n9ltC7O9F?VsnKAs-=8DQ)gR
zKH;(lo8knzslA@DUf=Dr7f7BNz3K`>(*f-uZbIyJK_2q_M@E6Jwfuf-gBfgxMx$G?
zD>$i=#tc_k!)sYnAY^x6dhJVo&lzWprWIFeZvaj)K8UUEIg>%cbssZwBHN#bTet}`
zg(W|m3vC!GK5Rn3-D~=V`6!^e^*fQd{q71D{up|&H10hDNn?Hps^lFV{ro@KB>7<a
zqKU^A9?IEp=)fSqB>7{gaAY!fl205vH>zCu-vG52ih$5vpY{7-;LJb87@_Zr4AXO}
z#1*5KX&vbJL8uKNZ;Bl+Q5>HN^fK`mJ_WI8u~NNJa4?C&-Szx4ukkVD8XjrAPE#|+
z<#2qk3Exv<Ek2gauP{ZoDN|vw5dEEe<2UXev#~1>9m`)FVIDJ1=$XfON7KZ;E{FQc
zHGR1P<QH5-{-OH5SmNmQt=d*g_*sOC@09Fb-rn@igo!8wSdfw~>Z6m13<Sc!NNoYn
zu~rBtgpC_A`GB3fyFN6yRL(Dag72&mWv_xb$iZW-8<78$^JHj~(|De{qyz!XE#0hz
zn}I?($LjNx^cZh0$L8=IJ9f>P^+sc4fA;{{7H$A(E0NXiL*D_>n&2=O(#27g4oA6R
z5rti>YUtUP6W;r;C}g{1mPIK*lfTt6hfg)uVC^lA(e0A&dBPZ52p)E`pX72~L${c#
zBb7cDm&*sN{f9eEY4Ncop>_mTrxH;^<L1tf=0P!+Ag%+){Df*1si$_;wvE#-7_79u
z8I6`oMty=-H;KSuPl#A@zd`=B2gG}=axXtLwe)npTujf`j`!Zow$4qU>F#-Jhd}zb
zr9(JB*yPCoeOseItW1!7vxwy%-@?(1g~T?Uprxz<v^xQlHMoqUGj#O1G2c4lzinKf
zRi*}0;pNJQ3MM^eoif<My~aAkqekK22Lb+czm8Y0-Fc}&JHTx-kA(c(;z8Y1-?z2F
zuz!UWFFSCm&HR~9#A!qVvMsu=J$xU~H4E`_BEEJ$+fM)dMf9`}EiNkFz2@ie&i1`{
z7?>tvlVGU(m0AAcdEoL0d;3#f=i{Qor6Q<6EIs&S!9wYB?%BFUR?9*9u~yynZJv?f
zK$Kp9<98n%YPa)VUo@ZGP%xc4>=8)hi2Z@>mZ*VDpPj72EQ?FC%<L8q5dLvpl6)#w
zpX)WBrz0>$(k5Wz^X}#ZJOfmC>5>TWxS)7VNmLx6kjgfkC&yB^CugnCzeJ})-PA86
zp$1YTB|q-piW6&yFahwknX#@^rA>l<%a<mZl!3?jbbbEq&bi}2B<E1MdIapQ5lc1C
z+CvDx1ig);No69`V)cFs{IMyVNLjC1Z0*()%K6a_T_K}Mn<wWvhM_vy?nYzT^7@<5
z2oHaLpP@%^Se_@3{w;U{*|sCSnmUqm-EWudR+$NnXg39XG~fr-W|kX>B3V@Po2zb1
zU?FSash}<_IT)ky=2n3Z(pi7bLUU2qzdv&@j+o86oO*)CXbt}jdJ_@Ho5!2<m5w2E
z0@HV7es)r^r7WK-4!C`AzDqWc&?YMQZ)ke+88832aud3Z+U)*UUI*F0RB<+ms+;oZ
z#b4|k3SO#-Z35_lTVW)HAOGMM6EJ`22OoX;HSmnP@F<RfZ<>hatZeBV`vyf-yJ(6X
ze-xc8{^4|Cd58|ZS_=*xdJ;OBDVI;kQTTw=eEV=Q8^%y<z|48%x>&Mgo=YSVq7L6-
zUYCjySVv34JR5nUjYP0>P0{)2o6SDD1p=#5nsu7w5dfGqUX6mmiBfZ1@x2^9O|S2g
z;djolPTaBo=v#{HNd?D8$7ny*5xeI^;zVEBT&1`ONz4;I^jWN0I5j=rl8hfc@3jf#
z2?xg|2zfXgc1ts4axQ9g_%)3qgr&~NT{9&>y_Niy&l7n_Q|r*m@`z@6cz`z62ftd-
zk;}5wD*;3|5qU$?-cB{qz$-ANp{?3?F1afiCwvpEO!Z`L9Z&v_I&C3OIDag>(td><
z+V$4p`g@JIjMH?S<>%Gd$7#CjPb8>cCHQ`mGv5h<HsLJ4Lnr{l|4K5mDuZ0_7(89@
zBdMO0*Z^AeqEkMbHl944)uhhGK5LV_dAtzsG#iM^p)7Lv@Sdm^aVYOXP3wodH(q{?
z?sCIg9t>s-;pvc239yS+_ll@N3sfE2ZOtZd_l8RByY%WBQ$z~5T<aU$IJBfvXEq7C
zdsra8thaBx3Ww!I{j)2XpL6&8ASwSiPcvmTlYXrBOr9QHoIOyc#Ada!jm5PJ(}^zs
zvI@jf>+?xnNmssFNQ2j<h8AYZ$Z+<uua0U|+L*+R@1idHb=B;!ldwrStcADN(ZSuJ
zL`#~c|9e~rd_KBcjCEjgU$I*6>UVQ`{j{zBVV-Z^h+z8kgx=+it)yl~+bT3td(-Qb
z0r3WTOf<b7Mzihq$L)4EZ%np}#k`eM&I%aEu({nNyXvYGZxw^hg*G<{ElC+B>&e=c
zD=s*0h%^Dsv(+!JL(iJbxhpmrn!!CI!#EH@R=gy0UM1_EzM%Jdo7+86HE63wB00I0
z)Ioak1|3n&SWPwT=Ko$2C!Cz4{Hrwo7TQ;C$9Zf0Cn~7do*iRPEQueJj*msk;tfFf
zN1@q2U31vqf8=)?J1F*RS`e!|>rZSzxa*c`T1fe(Sdzs<fsrVM?lMF8j<EUe4R+jR
zR=mqiVvtE{Om(%<kqKft8no2jk4dKwH+(}w!$<6OJu`w9B<Ixhn*Oms9Kip2=~eeZ
zW5a?b!sP8=wN2Fq!3vQctU2$CMjN15`V>4Ced_Bm%U#kGnq*MIAb&rTDr#?*lIF^_
zY;hEL*wI%OmOf$DB?8WRTN7yQmx|djzB}@hwj}aaGKAI#uFiids{F6PfY1=^*e@1Y
z+Wwjw1*fY$8wH_Q^?(k0t@6m_wW~o6{YK8Tkm#WeGp;X}-@#J#(i`lem4$$Oge(c`
zY`Z;^JV9!~z8Jz6TcvM>&MImAaY3g0<39?Ot{Q6Ja+10g0&Lt52qda2sRp_b2uXOI
zoy|IFybc-|w9eTO!f&qsCR3w^NR%~RJH{-sD9M2_)5pCP#qMdgd~VaPPz0}6koqG{
zK}*LseOg|wuK7CGsUFF^)~dj7yYv$ChQb@WExZB35}czDNIAp;&CFba5|14#coLdQ
zRo96~IsFf(`G5>K4bIc_DvKd-T9$P?52m`&tW}{76uj(YCY&gxmQO!^KY6YIdxj#h
zO~okDb*2z^0yh{+P3zf?#Jz3^4*KEFHj>PAcDJXkWN*-N58B+6_ELbR3<OOz4T&`d
zSt@5G_ZIQ=nNE{b^z4%;ztJ{YmQ<9E!_h~DAHRJ4Vy~ZXA>U-7<@Ff<8{Dgs@Ptb>
z6f)vV#v;1F(^EIdj&Y=k$bn%+`kRXdv+Mf{kuei1)d9*bG>FoO0R(MVd#7uOlhfqR
zbFGtnZ~(t}&1vzwpTY>&0T)LnWyCMGNd5iW<PsUp)@IMQM!kM;ZQ;Hkj1wGT&dQTJ
za~7SA(2*)vv0N1D@U$t~LZm0gvFWObqL#?b!t+@HVLUMJqPjam9{{fEtiI>SKPUSP
zAiNQbPAC)8C|QAKk>uY-^J&$Zh}%MIL>EUaK~?C4T8p)TkM3$l>Q0(`3roTE{9~2a
z_xa=8wxi=b)n{RrtoihN=dmsXU*T&r-TT1rx2sBS`U<gz07p%eBrbXSEV^4J&Z@m?
zP)YDDYA(W;1VlZZnc6)Ptp_APhd6p}0JAI#6DNwabqDxHyYqMQ<6dpJM166@#v5cZ
zco6W23o_o{oJXJ!XW|Yql(#)Uoh4Zdm8$J;vP3NI?nNZZuGTrSfwE|^PUYmy4CyE+
zd1)!T*e5E#f<Q`JV~&+iRp!aAeqrQ`ns?G*(LYuaaQ!uWXn%hcoC&ry)poVKCTIY?
zn)kPD6B^ia;mmF&G29(VC26}2*I1WKg<a}D<#?^R;pV*)&_g(mRMXfL>N3Kf;_h<4
zVPHfL^d}is$#fiIAn|&Mch_#ijgJVuSd1&;3X31zjvX1oh+58z%d!h#nDK>^Bze4z
zJee;b)1!Y##44Hn8ylnDI)@z4FU0e8ue=x1Tm;1Uvz$H$nQsp0w#l`KRIKxE&ZT4y
z2&gvn^Oq)FCbEmxw29Oh%?7lKafMi2O9|y`jjb!oILx$%$`!O#Wv<_2QhnFYzwnI@
zI<C}431rl94_bkO2*zUt46*i|-tc+h8A;?n{xc_ftPT71Cz3PFH!M9M4j-Xv7<uCT
znFi3`m>B)5tlf9wZ7UD5Jy7ts@o(TomquQ_8FTSv03#qGm8oAt=3#^36ACHj6l$MZ
zS%F^1@GS8wz~zY$oy#*MRz5Cy%25h$=726+NqWR-EPj7lP@|cCVNbEp1Dus`du<Me
zS~7Jr3Jq<J!5}T8V%H94LvR=Z4&I5We;4gU)_iPSwLFP$VRe%CLC|7_(IHWrh5!iP
z3a)QY)0{FFhBlmv#$r$w4Q01!%fw5e4~Fp`zr9Q2Pkc5YRa}1~{%7wvq^w_N{g!kX
z33=842^G}`k~Cl#>hn`KFB3&J@9sClJa1$jZ#P<I>tVm$E^*<oClE;7l(GC}$>rRx
zWn1xj3HYr>`HZ~o1Duf@A2s-Kdx;M;sKAuzM!ToEa|zVr_DspY@{4;!{wcPljuXxw
z@3l*#p-BSWR6etD?!K|6H_RPa#*QRJ>k58hfrUTE5u1V}7n2z|?faq7++NFVb2lI}
z8uc5c@3D~!b&uCF=c?#lT0Tnul8r&+V-l7?Ri>y_r2GrI-t~sdgHUO}lI@jR<pU7>
zK_IAWP{4{&YHM#W4d=_4vVOPQ31zrYTfLK9SR`XY%2@v>0=ZEp!J^=meTTo?Bvi;x
zWfS!0S83fVd#w$UCzj{IW+;5KMYF5v{_io-+BRvGgNyt8bKX_gN?AK5<9ge==?je^
z-q`MZR!Zs`8@&xa`IN$xQ5dABJD{n4zuxQZYC%wy?ol+A_Ec`?fM#igp#L?|L?LUl
zox`5Dp9V&tl*|ojkrp9_AiU^8oVB<xyvWYdc~gy~>v$a2Gy38J?ux$hN;2S;zf5vn
zuSPV5_HW<pI*#>@jj~3Jp|49{gOp6)Bj~n1NK&JZBU@jRcf6u{Z8uS+2^h;w&AEZ2
zsJF+7TG)U~dHPJ+ik{@u?0sv$J}aLN%idVUH#RI&4)+!ucuZqM2L;85IiUy~Q)}Vu
z59V!NN1SLg#pCL1ayPVCSvlpOp)BLCF;!1&F^jM36<JP|W;6Oz=9zU<DVQ{8a-r0@
zwc*X+x(QQB9aZY`i0<lM2-w)=VBXURC~%iq9Bc?(7-9(RT;DH5-MBP`iP0V0uHQ95
zu;363;8pJ}6iqQ2zDbD$%GS7M7tgD`^4GqbuOUwGb!pEBfKCTC%%RtuHn71$(>%0{
z%Yv8L30aA4kSKSDwlh_yz6*<x!RO^NB)ppwwa;1~Ze3oxde!e80$zxiY@2Ud2NfNc
ziDfk9-gRn=JNDSzmSkrAv`@yF_x}#E)re(;ojr_Fi6);cQdfS{I3G(auHgFHX3lpe
z{k()w)x+ogIAw>cNb&J!aG{;c{1p?vo7cR#vwZkm(CiWhU4i5C-J|DK_Di3v!L_d$
zFk<<^i>|<%mCJJc6IifLRop)@T-#zBzbSqfbEOeAQgVAA@YyJQpgaf-{7JQHp%p4n
z??nQEj=bj<K_-e_C}GYk+SCveSH@Rf+hqCkUut-zgKlp&O-W4Ki}&fKBXsnRg+cJG
z5@|o1b7yAfZ~$KI39L8vsQfbXUz_r5X`%|=<OBR1Z`yVrRNzy4PGZ12rDm<by0N6x
zmI4rPpovOWw45V=Jn&E_oGJ%Fkr;iFJq@f`yI(ydf5X$UUfJ(xZS*#}g>wM|YH+wT
zpFt!En3~-P*a(ywoYP%)yitNs*sjkq)P8SPB!MK0TwimSCxKrS2YJH$8r_;rR|ZdP
z%3Ue+sA<Xy;*Xbkute1j{(X?V-zfUuHyaC6TVLm<m@hI*hU_!Kuls{<P-QuA2y_q-
z5EzjEp`67u^I81<%SJ&!5_uRYf&T+KtGIf=gMdPufP;Yi?@K?51*wl2LE<^5?@!|y
z19sdBx9~xsVy)HQm=i>+fQ~HWK45sAy(<YH3Kg#vNDW8We;>k&1(U$Z{?t*|h+;!x
zrCR0n`<{1p9X-f3$eE?@l^sEaqW4z5Gmad*L}6hkqd@P;A((+LFLW6B1Nq`ebk)}w
z=bdv{pMU4(|EW2++o*sF9t4ClF>spd|5tINu3hX#6Uvugcc4Ij+`{~rlWOi@9yGi3
zMUU_qeggYfjXr$~>T^n?rn>mqQe^&j=XBgcdxId~sBg<UMOc6O?gSg#!Ki5&ZGTc%
z!c}6myLpjnUVFUr`|L<TOWT^(*+|0Lh#ngtbbDM;QNez!Tkl;o*=d?vu6+4Lri!*{
z!1AGIXgE)ATavDyptri(D|&=!lX4e$39+w3$!dW<zEC!TF)d!SDw6@u22DaY4#K{U
zD&J(@?mH0Ehp(Pe^B?u~!!8z-&vEjpF76FvYvXRlRaWjl=o)oM9`{%B@x;wDm|-s9
zVl1w4lT`%G2w(gpw%dqSGLpuLKP2OsGrrerZ@4HyhoJ_E#xJn*kC-w|Xeh39Zs_c2
z)rilU+--kR%ojbRwi1zMLEB%WZ$Sf;gV57}YeHKmT&m1Qpz&Yx>ooCiUTsKYD?4?a
zz}SuFB9lFor{9@YPfJtkZtyB*DT&*_EK4G8gR;CC*0kHdJH2dd%jMy<1?3u4WYx?F
zys`+}qu6+B&m<mqO9W&OwTs0H<Xl;WVLEI4iOc58?7Qsb<!DQj>877`$8LBw@z?t~
zLK`UA?i%OtX-c74ZA=704Dzv7eT~CE_CCpTpGOd%my&8$KYc}UefGW%RP5CN*l~3!
z+s@r;p{1$kW4&Cbp!2P>^QlE*Ov`k6>ohIP6uAMA+rTQ9abgXBP{iid?6Bhkv~fL*
zR~)@3Z<TOTjDbb`mRg%;`H>20^4;6OB{Ed;oYfQ1<MYl5iP6d3H3kNZeC0$j;fxe1
z+uSIWPhqKFXZzJoW>fO|N2E;vTx$zjYmHa`UnS`9m5E?Qy4G_VZ(jdM$E$y|uf(vh
zR$Rr%<)H&G1<(2H^hx2;+Ia|R2YtfD$&m!uZHJoFaB2=|)b=KLC4XssejUb5Ih?WX
z50ts<3Q%mL2L~odnjWhe?2`%AN5~<PUwVTopK5447NQWTO2-^RfIt)j3h1bQmpng&
zPWd$7S9KO_xNgLrqV&M*$Idqfm?NpP%cLje<DHO<Hpyd`|HT%0V7HVdGiAlEwtIxT
zPTu`HmoIlv2d$Y~`A|!ly1+;3dgV}q(1D6GZeiF{WRLK;6|ov<`%LJyQxy}lwtHd3
z;*HvH`UY<u99hVApj*QSe5rn@xTui`K2e?ys~_+BCzn^jBzS8?km{KlPXs1&IoE{8
zbFyk=whNwSzH7PZsV{o)%YtAJx5d%;@I*oJ@gkcEeV8fP&_+KuEb}k#*;s(pHTJLE
z=j}<=9r5nOzO<o%k{`$ZsginY*q*O#SF-nFd$I=WW`3yL>-zivw9$27p0{^CFS}+z
zikj`&fG-GhmY~_DFEwkIYxW<05ct_rm3TT_oNhyj21S!IMvMno1?5ltH0gX$Dc1&z
zkg~<^dXAMTko<^)pgw%CZS}ITI1{+l$HByY&IyV0S)(g-;oBhm$>GbwL4icZ3TY&3
zQPb~3xS^X-!^;K0qCH>ptlPCzvjQ$W9-q`~v07dsH69<=!<(7=71&C3BmquKDtwIM
zP6qckDZ|R&s}-#NL|dH}WDqb?grKzxEr1M7Gk?O9HqiJ1|52<Nswj!RpL*7uQ33m-
z9VY+r?C?m9AL7GHvE!HF@%+2r=53Vc@tOu4N52i^J**kvVJhqUz88`yul{%wXao8E
z2}yj}$;{N{^Ll7#rw%$peg44E%!K+=%eLHax4*(D*hJ9roY)hkC*Wz2yO4#|-`B8u
z<-rLcn$k8!`5b;Uy{Px3VCAiG;eVk!)3cuPYf(-z#qfVnmmzz|4r$EcWy|NMh>rsh
z2XgNMn(zT!8*%4humv7U?85@sMMtCXHQTk4XR!hdQ!=ALk5cDeX!Z51DQjD<>wweY
zp85WFFt4}Obw7&vyUaofXuoqEOtqVPv|#VjA4)$5iiF+LM0ccwC%{*P6TlZlcw9k8
zU}{PQk1f_`e~rd$cfB26c6WUXMx}r_{9z!bwhaQRHgd$QegZ5>@KQUYiugNdlfrsT
z#L!Dm7{V_{(2*ieYVZ?MuSV9>Y#Cku(n*;`uoPb>Z0Ag9m&*!+(MQ5WHT)L@|67My
zB&GBPfxB?~$y?se@~TeRyjBbS`gT|xaB7T^<j%LM;}&+}!k6w$b49=0!|M!y2lKo>
zu`dALnCG+8i`RdZ>$M44`1-8Yg=+lL8O{N*Q?OM=e^PhNCUa<$4rg0Xt1fBW$_?Qi
zhIxidYC;u8GO9<dEf**~$*flKn}&?v_*!zUIeCvzn*6UIrG4BcJiw3Hc?U9SX6<|~
z*x{RO0h?<^J(i_`sK-Q>o1e`1NsyNjtEPZ!#sqs%De;!?=l(jELwCEmt4N4|K5c1s
zUMBr=#az>t`9<_k@KY5k2k&69zwz%O<ZB0Oj6daRc%y~U8Si+$LimVJRuTHMO4v>~
zikz^LohS0X*M7%#)@0HY^P6tdRn1zSrCr7m)P$<3G0|dI65~bKz=hj&#+TpW%O3%Q
z&}49T>_>~4$_Ntz>w$5ay*4G!%^SElz2Qtv@B61ImQat7{z5DAPR++MW}vL7DFoyH
zPNoaW-4cGg=Z@(ba)x({&!Q?mW=74(vgc(8%$3QbWkS+f!mp5|vFoc<8=QL%1?{UM
zBlnIJny$D=z8!yRP4JGv2BhmTD=`A@HNG()Z!Ai^u=-C)Itp0-HS@9srsS^2bjH|~
z?bY%0Kug-CNps|{#`|r<AP<q1GK2AyZ!=i8P8i)J5FL6CB0brZ=@mYaG=FlF3ytES
zMP@1gtguBT@2BrZOE12gfR%S{TW~axS>JjU&+~7$+cOWBUhlnF?1KK!Z7%@nL$YSZ
zxt{0D7`f}h>F=rC-%D$tA(g><h3H)WzUc!S2FYAkvPUkfs@^yF{yHs|F}&{2XzJ|?
zsvI6h>MP3VJiS~}XGBa>BGE~(T$@CZWP(jgA?>W}DiyrC3H3ZEIy$T@mD}2rEb*|Y
zjFMC^J`WcZq-1e$WJY57);wTJ>ssKFEhzyr<`-RDd|`7Yn$^_x;ZoOAL5$$n4Cgi{
zXzK8EvCui6SFa39Jd$G?WP&POoIq|(Y5Gi?Sw?EL<<myPaR0G!$YT?y8<0gK$>lGX
zN+zV$cmk2_0(beUn38Z3tuqK+7L6o@^&?t*R-TB@FGfCU?W_Sca0;;Vm0VW29vISG
zsW>`aN|~OX)mqs!3VN=iSMh+p@T~Z3xy=?!8uy;3Fl@nh`N~A(-42q3pvHS*h|twp
zVT5=EHN9tTM18<zTsGo(s8!PWH1Jq(M@&Xt7#Jqy<Qt>5jJ}s{UO<n&0)=^Pv@_j(
z&--Kfvo^i|_j8ZyOas7(*y-DkmOih^{<)5(m}^Y^s{%$0{ekoaCj~H3ThiyBpJwC!
zOkZPt{x~(5xrFv?m@c~-<St58DXH`Ryv(e9Hi|G{ZP2G;2xFtC++KZV4pvrx#|omd
zAep2}p^hk-tlo4(n-uU7n0>x%H0XBz48<x8Y>rKi-HPEeeE>2SL#{Fz4LXtbBlA4P
z!@Z+Ixz4&r<UP1JxF@$=3a<VRvo$XsbhDqSvg0ARyAFykY^E{^wZ=4I`+aUQa_QDl
zS@d1Km2%#P+bX_6mKZ{>gipQLyDjQn>Gb`?8p9iiv{oev=q1zX4*9@Pb)@jHko!q)
z)=w8q;M^YIuL}4}&GL9k_U>-Rsc!e2=1@91&i70s(%(#qo4o-|$4`Jv4w+1J9oRrt
zhLebu+kULgfl=bX?x+bn%q&{pTq2Y5T5QTyozMh#{~^X_^Had;H}e1O5fC7Y`;7@f
zKxSP)Krlh@K|CE?U0hAf4b6Tr85&|(+grOB8ZtV1YN*44fd3!=kf^(^1aPKicSgnb
z^A4|pwgn(ZOS<pB?Gd>}$X`TxboB2@B~0DxpgMFCZ)p+YP=Uslycdp8Z9S*eH6Fe<
zfWXDS3X(t%Y?9s_BfO?X5R*8RVLQuiQQl(V)-fp>niI`)=Oh-8BfKUJQ9dF@3eEl*
zJ{UH`v<z4H4!5|!DG>9^2VzYGb1;x8_bqd%jD@$9S&+vU=-vjto`ps*6eJSV65mq7
zY2c3!=uv-JpoWb;Jb$C|lQ=-2o5QEw#+A@`N|n%?@Q<f6>nwMF#~QJ;LZR<sS*UK;
zO9_7(+sL(`XW<JTu^*Vz)!}^itt|NDjPascsi#qp)^NvKfY}_d1C(LA(lxjAOd(Gr
z{`~tS-V=mJl&?}_WRi@fFM1ze+br**is?VPzb)|{>8^-Y%|v6wRn0K(%q7WWx28c%
zjOG|$JS26b{fdo?MzO%};3pHz#O?dzrf~U%FiD^Tvfv^n^5w%Avl1hN^@LRBj|gq#
zMuzF9u&u^HrQ5K30WKaz(lJv@+ej&E(%<TAoKq9aNryC(7nTWq|Cxl(CZ4`x^ZGb}
z^;olyp5N~pQNm0^mGsP+>wlHh2Xc3w_z<lHWyBjAIZ$N8p(LjIqfRFN$(!KLWTmS=
z9$M08oqR}9vd(~u?Y#5$K68MUg7GQl>@nwoqD}rsBr~3H14_S!k_NjJCfFA>5)QS>
z^<|3wRQW6XL0>k{u(Su$fG4aGV__i0JYMY$Be&@ET@{c%gO>y+hV)411Jz@RP7}&F
zrk_`TNsaV;4FJLq(u7kfX(BVDQj!E>butp`$I(boRhUF;I)1RUwo@^|lzU|L8%+{=
z$i4&{1q54?1B$sy5aYz|JlG@9#O}|H;wKWQsMtlgHrazUzP`EYmT^TS)0;KO<=8YL
zn|}h68PadkLXF$eq4t?a(|)3!7mGmtEXq}>vbv%p6dH#-$*$<4O>n~xYl(pl4dKoy
zsmmt3{SxgcRN_}k7G+elM*?{9!5)`1A)`}*6u@N40a7SGD9WEpISdUXcr)l;ERI;#
z*KXI|c10mF{F^n*a%=SPZTR7Jgs<?cwlrZui$7Z1p%eVSAJ=q~=b=+xW7+A2Ki(V9
z2<}*ezVXKX96rQH$kMbWHZ?>4MXiokZ;u=fK-Ofr8<x=)XXd5tEKF4XtOw;*V9bSM
z{#otp0(km)-|pi67!S}KtW4LH0*!UnI@Tg)wDL+3O)e4vC-X13o*KRLi4h&^+2?-t
zO*j-$J%H92tFJ&yM|#Rl->Mkz8hIBE#wY^EN0dP568LxK4$kd(ay#F?-tG|d<pp18
z?2r_Ypc)->QvW>=<O9_RblWCc(93_ak8-;e0b>cCO#aCOQIO<HBZ;{*BC@C={XL16
z)DKigFp~a}sMKj9RKmF^byZ$WO^;8S1?x6DF8P5h;lHh6k~49$20^iZV0M7hm;7!K
z?X5oGuQ7kX!gH&Mn`0Y9UW*C?+e*k&7T_28+LrCl&yEA{ey$KKGfa?2(1n)HU6a)e
z08j_bjwwo~k~Ye4>%CR~#BKBfP(i(&YP%pZN<GWRUr|FMrDMTeAg4kZxd&V!>JbX+
z5~cWa$@1>qZ)dw@mb`iza>YN^3uWVb_*g~6)UHKo9O4%=Tpw8I)eqK=d1QxnI#;z*
zI#fu60%%1FP19URu(8CZ_pMaC+S=m!0bWmn8M38ZZ+BB+*>zls0M<k9HLfJ&A&i_R
zq+Ge^z8NO5m!x{Df<8sLJIVVr?7W0&uB2v)ZIWev+P>nE@(ku&kz8m9kGb@_LhfqX
zHJWz0r_F$Zy24X#M^$WLo>9vm-qJr4#C9Zv1A5bL^@1F#j#U;-C3U<tIGZ)`03>P=
zGLyX_h4OZd#j+h|89SWd$cbr^7|=a`^Va}1SWN~Pf~hf1JZd8)k)a+F;i|qq3{1i3
zJi%Dw(o{PnnOYpiXxJ#JseMu1D`BazfjR%6Vu>`OtUmRKDiBI^W>AJ(QNr7f09nwU
zU)gdlX^uNtfAzCAwYiPbMb7h40l2##sffkkcV5o{!j-R=%|X}sTeK{Q9Jo-D861#>
zd1)P}BR2;Y9&W$w@&+W>!A#TT|4>p^V?qy&Gd*4#NXUA((+PVPn6Mi6CB}>4kthGF
zlT%-|{Pr>-F{T8on^WP}74&ZZ*ChdZ>H9qMGtw2WtXXR}8ZC}^sWjf98n7z07+D!B
z(M2WyJp>wxPg!b}PI5rS{-SPXg_dTGox)1&a0KPnogO|}ft6p5^e3xBB`UM0KL{_O
zmJSY?g7+kM-9TBq3>wX6H^4F`v%FIjY2<{gV0)+(vX>;cz<IUWi3DOMK3+O!M#&k$
z;YIwtiZL!Ve{rZRaI{o&3>fkO^97;Kr9lzFHL<GX-6E{U^~hzOVS(4T5B*<+y;GEB
zQP(V(&aAX;+jgaG+qPLJZQHhO+qRuqX&YU)yZ`R{{iAQ+hy8N)%Nb*i6+7mNh&i)n
zwABj9&y1Hb5eg)V+##?E6Nh?2c7p3UXH-d{_i3ZR+XobD>zT{sX`E=xp|5U*;py9T
zfY!Q7<LC7!$~L^=S;16LOVFrlh!`LFLWDbrZEmk_c?Suc?~_ms1yy8Livc(Njl8pT
z4fmC({(t{v2gbQo<sE<?uTM^_Ab*@?VU5?H1|O1r5uS~BjSnI1VG||)W&gd4vv0{l
z^-s&1mkhcg@_(s-1lJ7slVn0Vg~6(Uh%j_w$CX%s8Vr_PY*tq8k3Tj4TpxW(jS91u
z*}8#8*_115%87m`u##pJpaNb(HzPSH2ATclUA#p6?xo>D6wkC;mo{NZWlq$ElHTxu
zKzvd3BpR_<?cvxNFAmdFA9!hbBumnZCIkWA_v)qgU6IeRlCb!{nGJ&I()*%e*iC28
zs76s~Ac_M#J4#Djx=(mb`v-gj9wA7R+prUL92)x^<}c-k!Tl4>r2w;9o#PTW>$MA1
z4xuDEOGPMY@mf(~GE9}@5|wzm(4nI_M^*^ttmyb3XvDuvKQu%k)Rb6rXp%uJMGW|X
z4dKNUJMLQg96C{C;x@d}2pi}bg{eyb)KY8@l=FB5J4t6QW|;@pyrR^py`VaU$<DEP
zcIF(Qj7E8lm4+G#vVcCygI>_MrIln4*kN*qq-zH8#^r`v1xVGiaN;4^HlbeDTcvcg
zUN2wfPX`Z0u{XpkQt%-Els4P>LOu72^AWb*QW09FL-xk1kMrC2_tVM!JE83q58v(j
z3Z4+jg|@*@PUDP0nHVBWtg)h#d-z94U0A;sbT#|)I&$cg34m7cq!4pav4<Q(C`Kqp
z^?cU7lh-<w`|>+^lu|wKn4B!WGTO?BF$7)&I~ynkr|~-f`ZM{_a%FZmU}OE`Pp2tc
zMgD`qhl{luWJ%j?1_OHq6BHw2u-JWEj1Y?1=ECs-9V6(xmrjNMjtFB5Qcngpr5Qo`
zSHt@XltMf*DWLk0Oq4d8jE#>m1qGB#`&CDfjaFk9`VlM_CXDpK6Vmh!BZVbu!eWkv
zKYRZmhJJ$*Rz$pnVnv8YHG)>SY35$EMyUo`*@B$d^an?>!wVW3!YK#UlgVg7>v?7J
zM)Eex6S0=E>D`&=Ocz+SK@gZ4+X~MmAqW$@*}War32>2QDGC!O&1RrHgt@S8P=R)<
z+b3x2n1Z|&IL0+SN9uv^<V9vKUS#nL$O4iLC}ugKnJ8ZcmeruxNkeW(Pj2FewZ9Te
zsq}daYjFZXG9n};6|VpG<0qdCv&P+l6J4HSR=J|Lfy|w8!mVNcusholPf*G}QSG0x
zk;6(=0&rN4z1~;Q+tH-(kXqRjO0d)Nf`f?eG7(ji?5kFvE^_0!SVw^oIU{HHFYZ0{
z7d61)yKhW9beI6I(b}+8==|!VYU8PWuPU&4;fUR4;*Es<5G7rX7z!%ILY^l~21ebs
z-DS<3lIxtNQcWdUl{GE)LdR6Xa3-Ph7iHMLNf8Lyh#a<S1gses3ho0%sxA{wwTSyC
zX(erZ_-ZNZ$ub`u9OB@1c>55@HU6%^LsEiXu6dxOjX8DjD(&d?j=MyJJNb)#IgmJl
z`;%Gg(710^VPZ^cXLbXc1I=n0A+^r>QgsSH8V;@+WiVUEJ{Lfh_1cFw&>Ohe7eOAs
z76IICdzMPCl<ho$x)%Z%`7QM}&`WJQ@QlXKsP}DwoX2JiGK!xRIkP^KHB45dAz@Kv
zgou<NSid{SKI)|V$#SU5Cu4NSm0QP>qn)Fxad3TM%PQX%ZESU(d|4u+Mo-4#nZtDB
zQ%0c)TS1j36?iOxe#6U0z*>k6yF~D(i~^*Vxnm95zo1N+k?Q>p;k_fwYq;%%)nIO2
zfulX$B9l~&qtwlS<FlCc`a<#_iRxmQG5Tg*VKipJbdIr7yb_(r&SR*7{q?p<jE+i)
z4bQx}FhnAa3-`$OKbRG=KZ#Uy??R!9bW}*qN7)3o4T8*Cau4BGc8Ri0m1oiB2LQ}6
zq0lJTjo#-fBBALNjLr8;@R};DG64#i+MG5=?BDd?u7fr+Pg0v39r)0io=G9gn&Z>s
z!^`bDqIyB++;FR6d2+nNGZ0`LO8Om24SZbOeN^L!YUIFiT8`(lo`27W&F5(-ipV9c
z<Z_c(`4P0nUtfO*?4v;KQ#chf=>xP3*RjZCusPrrZaru<ND<VKMZ21KpR?2jHTwmb
zBkyRH*MSHymNBpiQ#ZmtU_Nzqa0y=2REyXrXhi+4`tQiYDoa}{NJd>idNug<^t5D?
ztfub$2sIkRHLFK1t<}QjH)-j%8U4oL)|{J3oom#t2UVm-!q+EL6Jn70?g5e$Lzvu#
zb9!)ASroB?l*LIO{T`^Ef-!aCgS12>I*w|PWD}lfGN96;{`v;Qf>jS>)xRY!(*tr?
z0_tJzkkeFdL>}-u4Vsjf5Ak@QdC5-v)e>Uiu{gWg(A!jIk4uZBuzTAr&E2@Qr>2_u
z_X%Uw;_gk<`$~CJ>xGRScmdOxkBM82E3L#waG_-AKrc@fx6}KYdpY;w=nPqZb=FkW
zO0{f?cKSPUc1)|JfP`gD>&?$UVWVCKFA5u30@S1O(8(Kmmc-xF`qI|EM{VDdi(RIe
z4_92+bf=h9PdrFp9^%Wspm5D9w<eU<);-%f$h)a?G>dR+QT`q>F9VYEV`CsELLy*5
z14g^q8UPEO>fxar9!uCRscr2Os<BzNGXu|W5ck+L8YeDhrG1)CufN-KfT@-IT1w88
zio@+YHlbVYnP-%2hXeUD_c9DvHvOAx1f|QXha*Nwm*z3Bu%j?VRvnN1hLqM0Nj){(
za8-`0S9#}HyGBG^Gy(rIDxAz=^{&)!U=JKUZc~T6ws#71=Uf@w#43rYTkNvMoE_F!
z0bx!(W~O;NvFC`+FpD6xtj(valiifRmhKv`5`V*R)8G+GmRI<6Z&pfO7sOiC{za&s
zl2~!IX0bwKs2Zfy=PoO;06$*<h31um@0QmzKO0*eXbHn{#Rk00vsoPB5FQeQenO^P
zksh80^8~l+6dlIv%<H%CSJyV5MPj&S%}xZJ&mna=_c<!nf#j02C0Uz~8Z7jUWRh{4
zW-IiJF|?vcNB)CB4;Uxra3%I(2Y-Iz)+W5fUvh=%Qpp0|(Kuu<j>u|tK8s@^Qi3gY
zJ;zZ;M58GuHUQ%pw_F=#?hyb!3ey4Bcf^AIk-)CfCxs-840zk7{G;OsWPp!}34*0P
z$c|le>e<in^X=#5Nh<ee!ps3+8(L4&Z8Qwc^Qxqi3!M;7Cb#S&h7oot{uXI67gNN$
zb0|0$5%|fZ_LIdi7a3TJ+&DaU2iEi_NgP&;H`N{G2q3n&Q+czS3W=v*`&S^WJ<-q8
z3}CGJL)CCXAI96aN$i)1Sx(c4x+KCl%)-l(?*?rcgUP}xDLdTN!DoR$dXq@`^}H(i
z`fAOb{rx^9Hh-SNTSoNUr(vbtXyikrdr=~%?H$Ki$tvyz%?k$>(wDQ6kXUg}S3z*T
z$eJd(4}i5$-WJJ?I$jRwSf<AsvtF{^Y(~UW(>4gmB0%3T8zhKmrDbE0X}qwl)O)CG
znoezf)$yh6XiXTyZ`YPp^9o&daZ`1M(=`3?WHbG8ShLbvoc7{Fu4Un<{hQf&!&x-}
z)8rwJ<)t@PncLy=78`3BnfjHf-rDG|2V=W64j4XGY3*xnS7hp@NbSJQnwNQX$>lt<
zhGJBnR|>2M2wrB+4J@zK_|gPXd^n){+l$}(6iY9VmDByStE-pA$(9HA%&o@j>j9ST
z(Wroywct}y<G_L04|PU{$p8hJrdVDY<Dol#63AC-5EQ@Q`n}y>oaJ9Jr>U8~Fh=rZ
z01&zw1~(5qKFmJOf>24Ug>ESt+1&8E$r_bm`8^Ff7d_h9{pD@+mr>LdTSGsU&3BvD
zO!;Y0=_hYQx00s-=3R~W@<|QdA#ybUeN8rVg^{5A*{iLZEK{ATfG9Etzw!N~qdiYc
zN{p}1-xuxO15`q)`2B1IrV2hOJhqM73ox4dG@hI53EMa^wFY*{{eb(6i<58$CAQb|
zI4QvAx0z>-*#{4gGJO7DD29uhu)iE=t{eNBYIQcJAL^ZGWc{sb5Fln{uXE8j3l^vf
z?G2sw%))8R0r^fxjRJD=9Tqy8LYRk-ovWRl-Z8jB@5zA??H8?O<7E7z1xvz{djMu|
zg5Sz@0drWGwYcMw=dmgpiD47Ua(?rKE73$V=6A&W(2MFpajbRV?Y1SB`=Y^Q#X6+=
z9+(Rabr4w{LPR>4=J(thi4C!u!aw)E_IKdORlhKhsO4$SFqCz^Or&yk^d<@Tw!Fa)
zJ9`!G7Jip(NRQfxj3R)RKk}pQdH_a&mu2{1nBrmxMux{7Jh-+fL}_D7_XBXBQ}o38
z*nOk2hlOrO8ai7mYiGI$S7~dvRw`YsJ?Cq;XMyqcWhWYWbd=F|^1JTbYw07@Vd8j`
zv7c<awOqMv13N<Ane(d0P)n^jN1l?%qOEi)X{{Ab%q%ml?1Us8S7Q=6+5nGb0WxRC
zJlb*XPMQsfhR1g}fx|T#Ev=r0sI>R{5-(Yz!Ahw9Zgfs`jbe>w>K8iq{Rv*Wd)%VJ
zxfQaDU7#+2`#t~8tmkV_KbVQ~$`bL}HddR58}+*8xJ(MGoIBqPU9LAJ8vnp3MW&qg
zfw$I93?6~4z#G8zHix*w1t3$rCG?6ACmD}7g@dLZKF#&t*bcf-#>dKrxZYm1osxKl
z-Tg8tAcE7qda`y7FH^mK1^PzBVzqL^>(XaSDr>xazqnW*UoT%j4*X=_rdfsd+}q&n
z3Ze0fj4LS(ir>D@8NY?@r;Zop03WuZTWUnkb41)9$xgppixR8Iz5y!-m=j7H+L87N
zR0$F9REf&%X%QpH0?U#=;TvW|*C&6xUR>z{l;8&NSzi_4Toa3ACacuSw=A_-Et*Ni
zsj5V5^(-_uUTKQFO2c$?qXwv?Yj4FL{oj-b$rS~vf`&Ms)t8v5J;GSXNS&;~UoJR5
zV5BxEdMP#Wb8*tRPXN}n5u=(h#zN{P-}COBM~9A#BhGw);beWvtuHh8R!F|@3=v-X
zTNzvSL{M}Hn#e>WZj$RDF^>+2+C=285|bUL3Y{{3i>{LsHFE#5yl9$D)6xTrlY1FS
z?V5AxN0uzO89AS>c5wCq*D!Av9^4$CHm;xGE-bwsfwwtl698vu#~{S_Z0g{PIl)ea
z!Hgqzd_xz^M8M`kN0TNREyBbEbdK@Ro{(0poed7Tl_pVIQF4dU!$GH=;yj_|BPaO?
z1#?ntM6%wqgn;VMp(R0wI-d`Tz9BFQKFg&toBynpuPJ?`q|zz``T3r(<^6YUd*z4i
zCd9$b2MsH1Ai#9WEu#q`Q-Kf6x)V*bmGHysEJ7$8bx`9F%q@W(;bV97@Ghp8giITY
z+2BvTFK@zVp~M}(BsTYskI*AF`an6OgbM#;cwyFHgV1F<sq#ic_vRIi3S~DQsiR{@
z^RvrY__Rf{b|d3W?BQbW)}SJq<z+4-)nA6!8k21*d%zll)T@8W-{^%VtCu^GESRCG
zY{t;|fY``){`icgCyc7gU;9$n%;wA2`GJ*>&-B|TUq`W_&*6*7G=Gd7>9CkF_BRzD
zFTPyWiwqNbENA5VK&+GJ3d`-^_2*;Mujf2x_DyQEGh3O*zWI-hEi+jdm^P)@TkxN2
zR`@-IkN~XfDzb^c`y`pLglxT;L5I_>N3m#HUL^+EMBw4q46bllz`_I;((;E#v00G0
zc+G}3*v!VmS>-O7RIO%Aq4IR7OI6X2?svzf8iiR2yoz)eU%(j_UD~LFEhmb%7a=@8
zPU(qXX0ooFA0G{83j5btTu$)*#81Q7yQc+!nFB^qkD$f1Z#;E*C7JEmD{UbcPsN;x
z>Fsq+6BhWg!_Bv{MLGm-i7@xpny+ZOew23L8Uq(n9~lSzLp6W9_Ebf_5p_k^N2Q_4
zQ)$c%F8uXyQ$iV-Do(VpC;bUB9RLjKpZoZA4iGd|od!^0RvfFm)2Ee3N-lcnmrY)j
zegiJjUr0$%SbZDAGyeEmU6bF4SCh8Dg<gk1i5)uM`xwMq)FPrUfh6{lE5E&sZ0L4<
zF;uo}c-ap*z56MWqF>u!8VK!F;|Pjm2TWd=4{b!o^0Nm__2;}y9!tnMJ8wPmhd3A9
z@FV+5>UKHkmri@Fb1i)+T|EBHS;R)#+yq3M*Ne*uA(IIsYm(~Iyp6TFYVv3INBO9B
z>0watI|3Y8l`CWAh1FkE{bpB`lXgH?;YuD7yj>j}+&Wz@Hdl3YY*VVjIJtS>(PfeQ
zm6nX<FS+hsx{Sap*#Q{Uxd90*lr|kL+7GUv#GTg74O>)t2KmYQ+lXyeX=b&{ynw7U
z<3GHwyK+hbhA(JQz30rlSi6H)NBz}D0Z>qq3*;SOv!Obf;!kn09wJYw)?%%-Bj3Q6
zcBT~3zx`g=wOz+eu6oruHkB2B{f%Q=lr|0aOnl%p=?JI%7RW2xqkA8|B~aib?S$Ui
z^APY{bqZqmZboiEa9|rg4}cJSd;);hy$6y+<qBs$ymb{a9zsh>A#w7SHkg!1RH8Gg
z*{+0itf{_vD49I|sd^X7P@9{Zp*9u_nV2zG9Y7OYynXU3yvy)20vR#&f>Xg_SpG48
zfR`ELG(aB5Bn<(kCU-6`Kxgp-QWVOhUSH3rh07~j*q#l{7s5AC7*8jn@&ibbuR8RJ
zW46?4`WNd010^^%A$UNsJUUtF7ccyPgGn@L7ds{XT`;L%mP4{?tn>bPR#V6G=*l!>
zTT;tJg}Qd{CK{Y?OTaKPO;k~$CaQx>U*I7xgAa<`UA{kwpj!(lgM_pnYT63B>x?IW
zZiEG<ek16?Dd=tQ!b1hINC!~33(D<VqCMq_`2d%Ut&dpcQ+Z7(1fD?;i-x~>nuvnX
zu7fe_fDU!Dj5CsSR)N2$-u}{7asdHhHaVz*<~RPH>Ph;YN;<6E$ZK3P)uW}r&*p2m
zGeK=tiE4o^pQ9?1DyIg9MwZse{BHyYBdWBnzqODaEZi$<BXH&)VMTyTEZi})Aiu$k
zj3H<ra*Amg(4b-bh@S{q2J*xE7Y|dWDLf;_)xpVmptinlD};Kjh10UlhY3Nz1>?b+
z^WznbbguO@pOCvVgHuQmmrFSO*XP~wU1X^Kgi*H3hrns>hL#S>X0kwLMWvB*OYzNp
zpm`OJy3t0VwO$0$OBaBXH(}GxUPK4w>Gugp^Is3!<V1J*lU=rl$d}!={mu-&9C^^i
zhr#m_LbrNQd{tSMIv)B&EDL6dVdhPJ`$(=`Sd|E8T|~e!5@l&4F?uioS%ZBU#SO5-
zx(>@zjC3)RHp7q;y(g3pYUR$;vi+K?Z~PCV?bkdltp6x6a5~_kuPVJke-aIwT>r>I
zoI$_4z#}+S=P=DGD@z1;+Z%GF+{s3bQb^$wNXl<2Vu`9d<)y-34iZiDkn-gv&u12x
zQ9V=8bCo&Yb>l>3{RL;&9|<2=7*r=6$r8vQJa%yEdT#zF<+=yjx7+r5wj=I7;$BD6
z{>X-O<<{~J_XaQ|bfv?<*M%L`F=^{s<pYQGH|g=+SiI&}5l6V?;@81Mm(`LK{up~^
zK2Mj|v8Jpuk$r~*kCpQ-d#>pBPL3NK)+Ho|WhOm0<~O^Jr5bAz>_)vcu#315d@e3F
z1s|+eR~Qz@^pw`k0@5?_{puFG`*!d1>eN?nK_VF&tO+2}Nw_koUT%G`ZMAg`G3%8u
zQ!}1QQfH|Er?mfjxLaK!o;j#GkVyYTZR$Y}tawN)HA|sg^8->IL@iOIFu#&x`)ggb
z(NcfW(9}5+hu`Jpl-s4u0#{ZT8hdp|;6F8L5wo)1>wiAhGT{G5qfUf;$NaCsy``K)
zz`!v7lkqi?>4W;eUN#ziz(axlbHk=Og920i*RQRGpuj)4P&`m;LbM)Or>PZ$z~qpI
ze?}GmJ*lgNz*GQs3h4j*6By8i;5uMHK*sPuK!pD(l$(0EI2jtd*gO4`%>O?X^Z)L+
zwN;mO-sFVs{!q7YvSi*IZ)WK%^;vW6aLjDZuIkK|3rPV`MmCFBQH|H%U%(^~LrpxB
z4S36wf&~CEk2hB&SeG=4s5CFwIyH6y(&TuzoQC#2eFd#1O;Q>vo*%CaAb(SxKl`z2
zT%?Bn9o_%izRxYeX8wVi)3vrX2GZZif84NK^_ai|v%?i9F`01VF|R5GSv-6KA#6Az
zvaVi%Cgla#_Vo*hTCG5`rI*^tn=xbw%#VxIoe!Y-i|?uoT1|&)B0FI<-&BTWgL*`J
ztX=%9$B1)DNGaMeYM-e|6PKOTy^!7~oQZXm*@J;A%A@9Zi@LRXc3~a>PL!$>iOMzu
z+oaT)uxg5-RzHS9NTN8HE*GiM(trsK^OVwnnAQ$p0o`83T{O0XEebNp@raA+YkwC&
zY=4I=*VkQy#&UXyY?gI{58ylX009iLAQyAqB0DW6#u^AL=max1BpqY^3>!fDbCwjJ
z-?q>}@IVFV!yG6rbFf)6E=B$^l@THk8BxvCj|y+vGK%Wi*&*C&szT$LW^ABA$Sa~(
zscZr0cAR>!%L79*7Z+P~(*1c&gj6VKMO&mrIXk*{b8KW4jJu7hsXtoJsF^E+w}Oq{
zfksl+3}_~<VQcoghz&MAmca&<fX^;Z0_!K<wW7a~3TsfSAoh|w9G0w#imyNwaNt`L
z@yn?#&7Ww|ZKk`EtE~<uJt`sR;paj5JE#W06Y#tr#`H&49Sg%SMwGhm8$&$cD4v8B
zDu4l|h#mgY2Nl+N)LC*yv-(uTkJW2@>sSXXFhCQ57O-M*cSBtB8TQ#(^MQNYY81-9
z*^M@YU*nI07O(BlKls9HuJ`~UFMh!zdciA}*+5<(r7N~%{^MTaI>^VLT*Y8}CpHH7
z2=TLaw%9?FkxKBn(Y#*$x%mTeOM2)D68w@(Aj@HyY)4=Foke=v+1BQVGw-&?Bt6v=
zK7j&pEuA<(#X&CMuuUPWo=HZu?vQNVIn)7^JhP|43=UUqM29=BFA3v7&sr7cHTX|0
zUmaP1z6bZ2l_GLS_t4|l^KQM=ztRDG7{Wd9gfAQ~k1~bdaSpc6WfS?t%_=ZqMlgsL
z^2>*}F({H(c+9uZv&iQ;M-DlInhGVBmgT0&)h~|0w(`7t?1E5oW@J#JiN(KybMq=f
z*wccGqrLK?9SU2==#1uI(mC|diZjF<vgy2z*=v`G5vwy7kRW_n7rBZausQ;^wuBu8
zn3Shl^r5qo)TBbjNhdIvmxhrVvi*hD=qV+z?UR6A42pwr{3(q()w65|d6C}^wdueI
z$54Z<xU}X{1}!G@>BLrne@?^rXJg?UGwLz>ezEk3J3Z#)){&^!)ryA))6s8?Ojpd4
z*GP+Fp-%4>R=uETJ|4f525|y%_4PC_OYO<8D$|37#oJLoRR==yYuj25sJrJA_~SA%
zQ@X-i>Mr5s5VT^n4OHIUi#HmO-<M<ke+~x+9|vd0?f(<L9<@lg<o-GS86bgxaQ;{D
zW$0w`e|voZ|7G_1@6Z*kqHDj&j_SKwOSl9nRZc<ChtE#bg2)PCMF<?Qv>90q-6SC@
zLa9hr&MnsZ(<6O8s5A$M@@b%(?d)-l;khhf8fTQFPN|tsnW{==J=FQyLVt8JO__jG
z?j^E>P5<Zz#S1nYwho{qTONSnW55uxc6ssX-3DVxXLL)tob3#~@_inC0M#N2?uC%l
z!CpKg)uSH-+u3vH&W2Y^LlmVE<6<K7<`YT!V)G@*pt4Pi4wc>$QJ-X#$g%5no-i5t
z>-FR6DX|cX7=<9|f=1rP18Cjmv)Nj2J%;lUnYo(~m}t$m;Rmn?f!C%JkXzH3w;|cU
zJ#LzMHeQqAbG&SJUQS+nI=`GHW!1|oL8ai(UkHm(`Zm=wh%qVQfzTWargL%F_~sD|
zf;aTr(;;nTeT3NyPpHVQPy=JV{#8AD*{84f-1>SzAbQSXnt)E~_a`>iT?U;NsTUMH
zue8ra@MDotjRTN56@)1tdBL_Jsjd%cs30Y#%8_@t)F?lS*=(y3?%!r0frAB?Pi;ml
zn@Owpf4`^h=A*Qo!m;YPOhvMNz`}v?y|8&HHrNJa!o>cX{!UhY^57l19XexmUDXt^
ziqJ8RbE{s^C6l{ci^E_?mRA0ScylgzT#lYFS0xS}w+t|Pbhum3W~7rIbw_D)$k^Xo
zF}w@rY17R2%|$w%n?*!Z`fcxFs|us14FC4nu~>h3yr@}UjOB8A9QbBlx46deSQ%U0
zMm9?r;V`{(<vn+xJO0Edf<3O4h4d!ZMkubm3DT9ndvIaBr1HRiRGgFt2`a5+_zu6e
z>;OmgCJh*G`u*k9mSyefrH;F4bB})6{83YVm2VYCTWk3K@@@L}lU1Y=twRo@acJZ;
zIa@tIMI5>i5#ezM*ungHA6^O8+C&bSP09Mt5zrxQ*lg!$X+PoGd)FpkzM%pAv|l9B
z<xd}D&K)Ng%u?*{t!VhJLHt3SnhgEB-9<Gw$V<STgZiG!jv@!5m>IWv)bbpqx%KUS
z3n5389E2S+WZ<jE;U4Qk2xX!CR+7IfO0V%o&@;c_*HVon#U8@-=Oe|QSe)}ppDTZ{
z%<N~ZZ!XT0ctWmNRY~{snA+9PTGpI+lTnE(+lIE-`4{Map44YOAbEKJI1tbQ641Yc
z`X83O|CebPI=PtsH(%a=KdRjt+R9t(sDF0#^EIVxDI!xzr<zPbuu5o^%_zr<`X+_=
z22$LU$1q6KCyU;0_{39gIuR4(o)A~~)qMe{i}CUv3Fb;D<6>Q(#SOPCi-n4`jX|%5
zDWsZGRhOL6jqBYhtV`)l*njfd6_P`Ei0k^OOSv_yvLW;LXqOY8y^Bpx+v9|8FZ?9a
zt5J_ss9U56*Jb-rIXN1*8#NEhBs`}ygNR5f`olS}g9E5V;fu`7TmRW98>#G9)1m>R
z#?VgsyU6fH=?c@p5NhRbc?uA4^zd>q2!Ri=6eh6}qrR<?ojcib-R`ZtM(G`o^NVC{
z83BwbKO$3^DFx*}160{rxcXC81iaB}Un@S|7p;}4sozQz2Nb2SZoDvZ2;W)$YWr)S
zRis8t;py<H{dk=-e*@Yh!aan5C_4dUI_L~_`r(B?^`937s<uVP!iT^ZUe9EMUXY4z
zsS#0Pu4^$=^fVecDEN(8#WlXqeJs&iBaod4n(#wf{b&S;G}he#j^704<^*OVm{J_|
zUC+_^;$_6Qm7NTd?M8g)?ZM7D3)cN%GeG=wT%D#vNwpj6^y@0{>o)P5;H!Wg14s-N
z)G1D1xyQrD6li>;H7m3-P^Kf5v8IV)qCpU7)}sNiM~uszZ!Tn)d~&ccJo?!P9meH@
z2)Ya^w^YsSS4p?8Uz(FX!)36kHE@0zSB%l%_>^_8_pVD_5z+!W@r$&k0$?(>Frz7L
zK0^GBp}z*Mejh#Sbt>9Ge+vQ3M6aFfUFoeKe*fUfk}pAPZ++k*cYpwK?~8o}6qvm^
z1!i>%_{74g)q#b{%5f^(GQgdZ)<bJJY-TEGOhDin48C$~!GXzQtcexfs?xW)WtSgL
zx4PA6Wwa=S0ON5tyEqWldes&cnzV7aePP^8=#{2kI^Rn6KNSB+dddNI4LW~}cDzT9
zdomCq3%Kek#Sz4eS9avJ&9H}=S-R^W2QFG<q3euhf!RjcpJi3WoUI8GRbP_zi#S9}
z(jyt)VIX#xTxTD8Ko6+@U5M8}-;(>)2&?7^6$gS(#_;3MfP{A0V0@4{0{Pw`|A5a!
zd*H2J&#6dm5ATpvC{X~^&~)G@eqHU|ey#V9k8`A=?P&|?`pe3tp%D{Xme9y;{)wc%
zQhM>l(XX9P%|npEPN{k_RmpjLE+OA4O!uE~cc`P4ldKt7S5cm<#5W`_@-$Sq?<$$z
z<mC?&`f4~?<H%pZedkdB+UW|3U9Je%NX0=;WF?}KUEARtx8nlffU?M_j31EDueGCk
zhv+}_GGyP)sxwsJD^y=yANtJk^u9g1JK8IZE_2V$ay8C^m3iynB<-ZQ^mAWEb1Y-d
zNW$tiz`)oPQ?14tJbF*BVDeK*V)^7+biC>le%Lb|DtcynNK}2P8Cx-&(lI;io^@wu
z>{_!q;<hn{X*>Z=hP7}+_Dg^c{)TqnF=MNj-2UkmKh=ZuDKO#nsu;etSgYc;tGlzT
z0rzgY!HTBfHCz;CWGsHlQ8@f1&QpE;)^(y;%}(&4FQLz!0kv;;(VkBN<u#sOq%|>{
z@4BlTvxuO>UfgL$t6h$Pso@T3Y)KO_j~PS-qg}dHmI?xxjnzmKg-uwxTd58F0@=Iz
zLYGUpd1Rq2-O{RdkVAN<J}$;mIom&VEWB%OPcmpJB(78abHpF;&DRyJ$Hr!lkg<Xj
z<z#1i=?W2M2Et_aB*9l6m~QWNw97Gz`3e9N**k4vcCRE>VLm+Yw=9UjRW?R~PgUaQ
z)V;P#dP4y8PD0Y|)+O1IkM!TpUFXwWe_kIMBaGoj*NFXq{3l>vk!bGi{-aj4zy5zZ
zLnA{w6JtYLhyM*`2dirT4|??n@Q+_DkSBr2%4N{uMRENOJ?i&MMHB7dBlZ6+oir^%
ztePNY<??aE6Ix&=*L~klUhe!g!$lyfNo6};sK|^st@l7U)=Vv#P|={0@cPj{wfv-B
zscDt`j4Wv@YN!>?NLa_6Ze5)m6G#aa&}1+Yx0^ntq^9<8;b20W!99Eo&?i8Ds60NE
zhhZkku&b&rNtAT*@yxC2I^i2({oPzD>G5{QL1WX;kZ>3VF(WeK+_02|CcT4+0LO5j
zw$yPe=a6jm6h`l%#GGlDC9~jdKg45s3Ob}nqiL<GPUPy|l<K)(8BwiRQJ-9np-GuS
z_Z5mrI}%;2_ZRLhGm)(oFl5I`734##*6v%wMN$1L;NABu`!6~0V0s8Ft7taO&tDP?
znepFHbE1%}c7+#wkv<j@m`;VHH+TKT>5M+5DTcE4Dkyl5lN)h5%gmsd+|%^-sWUcv
z!zAw9P(~cV<|X6RX2TWqwP~iePnpucO_T$M(btE4Ax>N47ARNx0r%x27O%0hk(+Nu
zf1JM%aBIH1zUnkYY=MVg2EPsQ#D9&%33o9h{Af1OCB<Dh)<;*eZ!eOhOH?Z){H0TD
z?{rKQ+@i%+b-EwRNtbLclc;Fuu?CabgkKxDUjV9sa)1rHmN&)-WNMjrhu-uMmXp*`
zBH9sgr$9Jtq6V%{1uU=2o8QRgC9}q#gPX~N6z?IKp;Y$i&XAfWRB`T4g&9Q#Dc>eY
zPc|0iMxWdK^*!9xmF3a+3!2qM!wqYkBRmNdrJdFNPj0X?z%|ec2fDGbe&63>6n<Tm
zvdDo`LQ@0U%p0hJ0rKe|?u$Lq6H>M@8GK?{#mL#y`Q=7Q56EcB=P+>xjGJ;l{K7pJ
zt1`EM;=m~TL)wr|faKmZf>-_9&PN*ZasSYwyf&N`pPbo(pK_rBUGzjyP`2+}6jd}Z
z=^UE{BLS@k>$i>HjV)y9T}ja84)9iC$A!iw#|E@UFhgvjIE&z|^mO#o{-Q7oQ_4D%
zL@g6HD-`S}2mt>!)53w5N8so9XuI}i_s8dt*PkR_YZjj3qqrdW%oKtQ<atW+LhGi8
zRK}M>gq686$eDfrT!0K^;1G7>qq1}0)|?`1i}GWIt+Fa;Uikuc2v~{HG}-Eu6J&)n
zI`RJCJy)=*JgmJBIDAK&;H4k$-PMJT8`P1YGbRp&0N^S&Lp>egH6Nf@uXR=SN0>h6
znnLMkCBc$sY5L8umbdGZ5{~CAxO?b?d@0<O=qA=cIhAHygeeN-CdF>*rLzoC$%5qZ
z@F}fjU=5ffYPGjN@7T7Qto>r%3+ns7F}~V!lu!q#f2nP3ARzqzi|_&dD}?d?GCm_4
zO9vOX{|$ajYy3EGwxIp!0>I_cWg1fQIJ<4LYjS37mqu2LCflX#Dy!}ZkrLu=wSng%
z8lC~W-ax&HPFdG>9659ed)@?HHy(ar3nEqkWX9>{uvfvg7kBFL$WswC`G(J$glX>@
z$iQkDVj&JmR5GUV-tD1HlkeZsE%V`1R2C(oWzf*>f_oq}NtS?G*K>QJeq+9x*isql
z>z@kjK&i<QZ}LHfDqnijg^&J#zDUor!nn`XD4MhlADZJF^)St;<&fgf;e+MLyFMHM
zEJS7;m1xf|(GCx97Iz$IZ<aw;TzE$fF&P*}I0k+cOcLw8qIZ0Y%c}3qi&T!;FOF7*
zakBUshU;b6>Vg`x#d3FWd-u=>o^xHIT|7oP-P9cU?%gd`6Y;J_l`TcrHU2x^*MqO|
z`Z}Pq*`M~hyvInMbNcn8!^QDkHL_{I^w!Va`!cQsGX=(Ey%4w$47@nt%|FfuwDkxv
z^cRfI@ZeRNMZR!OWoD(1^;!6t6?`n}6I~cm!1<6m&ba5L*^I>Wm@^9k)d?^XYu7^`
zQK3(Z6srR+%oi?SDwK{^B8Ak-je@J(@1*ISxw(m@l&&Il?MXt^H>Pk;@W51niO`*z
zgWnX#9UqLSAdsz?81yh}8rcoXJ<93OE3!^IidSwmlXi*I%kg8H3AgrxB%)lf5@OI|
z?f2(u0-DZwD_b1A5HNCQp($-rk9(~iz?T6e=au;N<DR~a$1gLxCWDN*44a}uTzAjU
zGIIRBE-k}g^G$6QB<R{Cp|}R1EOV)%Y`6kECH(7ei|HT&>e!2lC3!Qq-?+}G+?5Ai
z>DttlOnmI3wzvI^A(m>!(B7PK)@?=+V)C1J=4MGCo07k%JFfT^`jxSb0;r9Xz35nf
z5gI@dSvOF+HQ|bwGIM9)w~~3YPe%cA7@<1hZl7IDt51tpko9_!EUptkZ)$A;gEZ21
zfIV?$#B1AkCK+@*Hj!1gdHphd$a6N!DJwK0H?O0j%z@~>dhXRb2PGRtyc^?=><@qE
zsgi$B>g>$Hkbp#ro}LO&Y;=MVaqYCECgQ923<$mMY4m}UQ#WX#uo0J$zxCkb0o4~w
zcbRBUy{aBh<@@Q%{qX>xGMG<A8WCZ!4tMc>O>+E;AW}V3)!Rx{0kf>;!4}zC!Ki_=
z1eeXROKc$BW+W&OdWCud&ZcWc<}OWi%PXvc6?G$I=g4R|pWW04Rt)%F<hVSBmhlK(
zK~DguE{zB7mHZc{hw<I^5lyMI?fyPSHev%@oKmG}Xw!AYGjl3{fy*{!OaJHEr*G09
znRBxFk^+bX82mHmJ?G#^2Lvf(C{Ts}*%ko>Talj#Sp%AK@1<$X8yyk?GM7iCS6%bV
za8UW|*6W=Gda}s2(o0L=-KgG!k7Fv1`mn3NC}f^Q2Fb%~DTBAS`)iag46Bh%BucKr
zZUC(LX~(}-x#bWrT%gOAN-uNBrba5P&k7y~-|CYZtl%%09>SR&H9U!m7NrXCUD8=c
zT^H{9?cxerSp?C6ZjN=IzT+nJ4KROzy!<X<<+OU?8u?R%MYN=8hZ*s$VrN<SZ{b?=
znQK9lW{lJ`J+mI0fpBwE*7nA-ZoS-ZSZ<YOAMgRAPSHsKB%^o)LQOKzXg;BMd1F8a
zTLgKb0W){PeB}DsG9}xxmK|I-%lj%NHaK7Lbc>1GRZ|&~3pVTqm~vppyXhC6b5A6u
zx?lJ6j9lyNu&MAV9Sn=z;K*Cba0%qFoW2<GNeMX9<gX+W{KJkv4SnX(;LX+#;;2^E
z7#hCBm%8czgp^CAFL{jXt#Bd3!}CJ!(t4cWuIJ})FT2t@`_eiA)zvHdYA~8Xl7K$u
z?0zbPwNa6awD3CQB23S~eAfUvzvw2^k_{EHis<YEVH&5u+8i3}_|=}bb2?ZqR{Sj4
z8&%uou9|;t=*m=!VgP%iZvagU*%v*Us5kibvdB3=kPG#{_J<+fXPMihPwmk$6yc#~
z5HbKl>2V1w$eXPZfIuMP(Ys&RBVLz&N)JdM$Z>ubbp*P5fB$IzS)88r=yABaT=E};
zZO@)v-w>d3Vb(JE^CfWg4wEhzg@OjXDooJ1y`e{2a4e=Rx6IG=I#>R&NeiZ2BxFc~
z2we=QjoQuA(v4ZddY(>|pKU{KJGSftZBXq#1AO}6jo5CsB_ZWxopJ?-xtnd>u2$HY
zDh;Qzo+73Np^%J&kzxfLhVH!*v96C@JW*@$#csHjb3J=j>cXIo1tqMPQLNsqK{gl~
zmQg+$QNyq)9*|H_rLv)}L5`Dnv7sWZQThQ+C=-z*etQh5ba|NVCqv32uu_tq2!ay8
z>cfp{IQcfc(5?`Zdd-qiz<wVVksk#?$r8^P1$%UgtSSg<CD_EX6eQOS$Ny>xo>Uz)
zaYQ#9QE3Y0qm48nYTQZ8m}odP@d&X`74{fHrxCZN&(B36=w5FAu@X}<#dZd>Il%>_
z7j`L?K)59@f>X3f+kg*gI1AtiK92sW0mZEGGy_4KF>{Fm?nHA0`cCz5A%A;z;<OF5
zYK=Q(%ZHU;%mdP~J@|eu5fjve#<}mS&c$ne{-JxCw8$&D`?ZCwHLA3VxQgvQ^ou?E
zps@M@1(h$>+1lQFVATlUNERz5d=C}CF202iVwa%7qR>#Wmi$9#cZd!V^a`QHp%Kko
z=Ik}g`q^hQT-7IJP`v;aK|N;bR01Avu8`D+XATVBk}e_@7=Q(y`+>qDzUDvFWQkJ;
z^cV|w7+?D!!Qgt)VGk!g^YNQPk`ZARCmd?r@Se?@Ab&kL@3tU(@E-h9tHT|j8hYQa
ztdthLzc3bNyr9CC84^?N6=M#>w0U)>1y^#01S{Ys)h4wh7{Nuj6B#>fDHVUTWA#t8
z|BneYUFE0#UjI}(pMUMq|57wFaxyivHnz9@pQ+9NIr-(k&8dl2l$8%+MC^W`!KSwb
zg;}QwC&GTI8^@2D@2;YuQb_3#l%y(P`gRXR==8^IE*<aUxapn=Ye<nI8K$FAZV=HU
zo>Wqki2AZk9-q!sqn%gMOi4zqvYd-Pxa``F!;%7p>5qI~?Yy#m^ymc$t4kOnL$t5B
z?tQQZbzF2%#D+LB+%9>))e;{sP=uQQoGIAAVN=a2fF%(h6mna7#J88J4j(P_DvdC7
zL%v9rB>L=mV0^^etp&=HDy5x4b|)s-%C0O~p9@(S)!KMzRL6$w-nE%gz&|Q-B<re5
zq==yi3U0GiND}~!TEGF!p&O{Y&vu!zA)!!ae^I&Fx-S%vN5^wLyafA6m^}5a%c#_K
zFbNf+F~ye|hqf%X3Vsi)dTNiLp$sF`!$IJ%UH7J2=fm_MT0Rr|o_!P!5ZvvbFxm^r
zLk{5Q5J)&~S3u+O%aj5%ia0fPwz8E4mmH>Q&~c#*ITaY@_7edp{7cfG-qwBTwl4!8
zyjuGIS=aH49+|R#HQj&)0wVoiT~Qj_+t`?zn_9Tq+B^Nvwf!G4;{R6KZ8h6}Z7<^Y
zwf=7~I7+%-*?^`mX5(t6s%tCS=?L0&=(Zd|X^Ko7GAq*UBD(io?;?xtp@|fqZ2r6B
z>;1vb(->sY;wCa|u>yRo<Zn1Pj!rsK&`RJn86n`}?twdtBx_Z~l!^icvR@?+%<7HW
zm7Z3~@aQ(y!K^%y79zL8wGo>p#<s~tN`xnl?%NvMnE?CdB%@@%6)T6rgAKpT*Z#QR
z5I~E;Eo$$oLe_HFodv5D8Ar%xQBe?E*E+h?thwS1|J@J|1>n=`ZSwQL{NFaClLf)<
z*N@oK2ler%V=j8@cXQV4w#oFrCcH7FoSp6MZ+!=rT8H+y-;fwxcjEE>*y4-s0<ZC7
zdh~MgJpg_D;Xl(hj23DdzZllt?@+WJW(h}a+C9j@8AM|^B4Niq(>$U?O#CZ9;Njuv
zyXlZTaC9AL7!4J6p+jNeu#H0ps-$EhxabS%_*hZyrM8ol?qrgz@4v6O*0UwY4ZRH}
zVFRa4k>R1b7~$}wg0;Sm55Qp8i*W1*HxO4&q5;ig){ur{!PNP|j=3iTec3_8SDtV2
z((y<lnQ_90Y0r%AP4U5lX4cg$sv^SB7H!c2c!42m`d+0<P3f$JqhH;&#{uBjbhx|y
z+oXr+&#*l)@8M8fcFt(c`#V@hu6owtu;nPY6L85R9n25ZG)Y>{<tk{8V&jU!z5WgF
z$^d~jaE<RChrPh%A%_a;B*|$au^!$?Eh~pip<prrg<)6}WsF@x1_S$=nw{w`mYp}w
zCXy38;{mw-`9Kv1-TP!PBed~=pbP0$Nl~hsrupY=sQN>>_ehyykDq@0F`YnRf~rW$
zAnG20wC^LBWdB9nf(D%=tW@TLDmmzA3_$5*PGn(w9+m+cGyzP9$SK6Xk20GR=Tw0e
zM4t~sI|2DIf}fyfwG{o)7i0A}<6KQ3r|ec}JPc{a4Za8)154YJ=N}aT2g`wl!=Bh>
z3KyqsQ`aCgwr!L0h1&xv9-e<;Hv>m@IFDY|mkX*j{-Tl&lke2zVFE2p+qrMLCP3V;
zoFrGzJD%ewvZj#*#w{&>`|!61BcBnuUa#4&4QGXYe~Ub<)-lWGJWPj<t=j92Z^hEi
z(UV@b@3bXWB?QOKM6LvR>TZg@Z+R7cfUdqv6;hfcM;eolB<0RbH>F`W7!6Zv8~^u~
zuLXKyVySfRrJ6hN$eVo~KaXei8{j?anP3O;@B0`J4<=);@WZL6b_VvPTB^yuO-kW^
z(-ye`V2R!TU4HH00s)czuhGEP)Wp-+)b5{G!sdSi2i2MYTj$L-r0*Jie?@v>=6Fg?
zMH_Lx^=|T+<weqI&i2e1%7_qRqF=vWl|Yh?!XI`!Vf;{ckJ{1?A(g?<XX){2-7pwo
z5;>Dei}6YTSHjmy=}PB%j@Szle}Z<iXQ-ov58fRyN068bR_CdEHYS%IOtY|maU@1X
z>I_^XdKjnx4#tDyENefri?bS!iSLwtd*dxD=jj$(+7va0Rb#?Ac<f2Wxo~I4%<>V9
zO>!3XUr94EKfOLB>oSd{m+f!cVPr1OCBn*-{@Vb^e(xweP7`~leQ$_z6NUlqH=c?r
z)0=Mv!Kjn@9Ol;@@Tw=0lH@UID%C1u2tBZ_E=BGH5C$DWh63M1(HBk*EmX>2M1oX7
z$V}6-U;y)(L0*(*S0YdBH)!2#H}%m8@1IHhHUolLKq!+Jo?8~cdKMo{obQXY`><{)
zRs>}KZgsa_YT|R$QZ%lLS=vg%SJ>jH{!*D=*lq9|F=E8e&dSMwEAXgXj@7exGK3Wi
zkES34$Z9rSi@Q*K8^P=)D(l}Nm_>yQKJ-UGkGJ68j8^iSUqqt@He$?*8y9=to!Z;i
z=+k|-c?y@12Ah7_$X`yr^nzrTQ5Rt_Bf6W$+<ZRHP<B_QB>^m@v*a$a#<HW8ky9C|
zP8UKveVwu=Ng7@;jomg<BJfO)XDEip90|Pv$&Nd-**1Q4Cx-*R2U}l#AdlUV-X=*p
zlTK?JB-c`t2e3|k7wQ;|hCjof<t%Bw_*1!OF%rz<^JUVfn4ysL&9#S|upk)FVl;lH
zlm56q8~J~z*zX56K^k`w7K7noKyZ!B_`LaXwfKzkY4$&krn;tI)}5cQ2UoX%%BdIt
zgavaFWt@y;yU<tI4nkFoSlZpfjRW@VG_G{hR|^(0jjlSWUwSPp2K(J^IZ-pAAC2>M
z(`y)|gI^DKb<=x|iw|F~>t)kti+2t`mo95YuMdgye_a0T7=1QA+~EgEF#$ilcS64q
zC1-Lfzya$b#WRIhT72j{GJ0hA#UjuGp5cN-p!@<nz+QMmo|7hMgZ~0#Sfti~e}n6`
z8wTj;x1UjPMd1T6aEQDJ%U%=3P!{uDem$I@5d3^Q93%RL8A<L8BVD0}&?ATWGrO~E
zEA*TrJ*YNlO0KWix4WMZqg?DQSe)h5m7P^`cK-ID<sToX2(Mi?W@;TX_TD7{l&#pd
zRdtpidnYe)s+I>1qAI1PrlQ-`FB~bI-vy6XnAt5iLmeJtz_xRRYYp?!qLUbg|6BWl
zhE*-c{*;#29MC^gbpeG81miGGaa8X&PUFn&s^FeIqG_Zp`r4XB{q4wgwA(^>X~0Vf
zdCm>SpNC7HtNC(#XVuq2jhO@kcqd#kTj)Y~KE(hhSc}piK8{p&eM7%zg#1j~&CYk{
zuAbk>5|HIL8VkirV!D?x;6li&!y6zi#un<^oySAPApG0_ZJKdBOQ2j(8ex}K9E8BX
zXF_1R?^~9ieK3$_5^VRIIeGg7A>T0l8d!m+OTLrw%+7rD6I54G*ZczpFu;iIHZU|n
z$62<gF}DK3%Wi+Z2IUVSuxQNgoeRE+rhEM)>?vY3phD$d3c`-$UG^^#l?-(G7n`;7
z6*-j^#mIpDGt=(G7_UGuBmRv9gWvVOzv(e~=FRXVl&`5<PkGDPO84P(ZykUKmTk8I
zr`RCv7Yf^R-J)luR&}ukASdjvkMgvmY()Z#pnE7+KpM~Sy`ISh%3<1*Bg2NI?hCK@
zx3@tt`a*?-y)<68MS+R43i(wc`^0LLgEs{XB!P$YbAbwxxJ}#*_B>o>MbM=^uE=Hh
z89+ijBOe7lc<7Mlk0|Y*hbC)zwD|5&#v+J(3;I+TBqqc#;Ww`hXjP6b(ZQ!6sL-t?
zsL^D+`u6LF_GDua`L3|q!0Z|YZ<I(TWhS@4@`QU@6XP*XqTxI6tNvYnV$0I$-FxT)
z++p=&!+FD&nf2zsL2G*Utm?l${<){FDEI=PoSjCY!qo)LPO@~kxbOzgmZoZ(7hDzn
z^<rnq4W&#3k;?fOkn#bG!QaPYkbG&6t^ra^_xW|LlKew)ByL=qovAJ|qZc@GC~wxy
z?BVc9el8}oulYkTX1MPBaK8xys=iS|g@&~nTE0MCNBEpy{-icOOhZC+7t%NLM#)5K
z=`oCizy!}TfCpPwsfGr6aj%R1{7C5{jcrOKYgoGhQLg3(pj7XKN7R2xWvgOA*MNVG
zq%r(Z4=ESAwPjk#Ya`x=wX&yNG5<=4O-Jf1Y$)V9(Yby@eGDKml+~QPxPw06dGNp=
z&pgS<C|=(2d8CQbT70+7*anksAe+G7%d%>_KeQNL@!DNmhHmPyUg6dA+Oom72<^M9
zhf3AAclmk&xMS-ke{3<Ny*rxYJuQb`aSpz`Y-Ic8^mTjJrX9-L@;|A#=Rr&<W&fEo
zd|EHMly%J^XmgJEt71;I>{_r9b!?^r=SgGWiT`+!S$Ss5GgHMFc`$&XR724pb3CQR
z9Nsyd@d6J$(MS9=$*(G|_S~VD(5InYUVdk|ke0v$Na?Palw2&xu`Q}6&M^d<74BCC
z(R-jLf2W6On<(hB%7BiNMmKFT)EDli&V0OEKai1dwh5Ju5jBVD#m@IKWHj!}(Bce%
zDS=Aks9s1#Q_aMWUuC3?aOb-x3lx?d_^hp<UaxzCkf$=6Kc@%r@-Bx(aYo6T<SEt=
zZOw=Q?3UNWJPIBZTNiv|<x!Kr0*M#vY4CL~{$#wdu^UFS(<QU0sekz&*c#Vhr5K1W
zqjgs(-AZU>Vro9MfQQSz0~-X_xH8sKDHnvXa%w$|wYUx;Q@(07^V8<)DP6DxD0b0@
zSjlbM43OSdIo6|x|Ed9zVm@>GQ-9upLzc@8@a1po@%}5y38Y2hpGd+%u-h|gRdez!
zEkCE!9Tbd>ON*H`-%>y6*#xr{Hqo!10t*>wLLe)jGjQR>*=;C^`^Zta$i&`-C1+^;
zTM>62sCO5>^!0u9@#yn$9p=-M4KDG{fR`T+E;)tM)Ea%+j%i|wX3~J>)ErkhN7We%
z(4lqgxpZVosa4G{=Kon01XqzmSbPBQv&POgvY3dy&Q1Eq+QHWUU6O2wqcJQPS00|l
zGEw5OWlp2h4VA31FA3i0IBWO+AnYCd0}GQi;n=o29ox3kvD2|_r^6H5wmY_sjyty9
zLB}>WbMOA{w==u*&G{Qny;b$p1Cq+IV5vUwNSL6b14!o?>-#@sThR109Gb0Bd_!tN
z(W|Nus*lt%s>SK5ky+)p4_bc|P0ezst-^tV);cwj?0<EX-A<rDc1HjK8KC%|OU3_v
zsQ6svZSC#;!-&^~XYIV+eAVF#NfR5w_Y$LKq(Grmz*bH^obi|+{|6{4tK8ZCmW70>
zPcAf;TJ+KNIpIP4NV-mI<}`jm(m3_P?tmYwFVmh#(Fvc*Is0}~p?<DSyUwX*-XikQ
z!(nA^zOYCxlOR7P!rXy+!Srb2y5f*s-DEdg8CNjt40KsXiZH0mG`sRP%|rmMBEjZK
zP>KzTFnXZp^*A6VUjZjBH&TIO&pDU|$7`h)zByMfBbW&{xB39uLHP=`4f~<HYs&b)
zE1=mc8ykZg&408O7yJTdMh6_WMPZ=xaqJ=ItPO>N11@x?R-#{tX@*uhZoWH1%V`va
z1~Z<>&+Q<yV6#X1llZ8xY6bTjL?(mKw97B=Yn9Uz)c=lPpaj~%{5%XDQFlO_Qrc0R
zhj_c7NIHMRDj|YYiHwtom8XI2h#^%Fi!re39Gr!0RGBsp`EY5X#y|~D`U@A_gV{Lg
zY^jM)IM3jX@8DUl@}R4$p@XqEDLZM}XxPP$91md+OlyM5-%8R;$CX!BWC|09iZCfM
zK88DBiJ5t7k^xMLZqmGnc#W<~^X}gq$;IO<z?~HjpvOPF6)Jo6lT$#JDIg$y3H-X^
zlI@Ye#X`7n3hNL4(&V#jhOPGoY(c;v(naW9DQ`EIV!-T$b7J|eH`smq^Gn*;h2Nn2
zG}+qifDW5>6BBO(sjRkD6uqB8o6ass52rO9)nX%gECP_Be2J{48gbsCbC+;tieq|=
zW}5Y%Gpiz7JUUvFri0Vu*6Xyb*c{@DGtPpECpk_&Bt{w+OghlP|A#jUlaA?C?4`%s
z^WzV!1gWZ-t&!73Ma#Zj<QbadsE66!Lz^xo7I16_{60hHV(LdIi@R#)tA2Pl3G1SM
z;j1c!OrVn%f9&UR${*HagW7y;*1q@!%+^r#F8k`Q$6iYv3AJkh3OaL2%f3=;7ER=@
z&0o)qYGb(*jfZnZHjS=lbhZ21_CwN>a+Z^FdZ*g`y_s9f9Oav;J4nn45Maf&)L1IM
zqI~@u2;Py`b(F%;l<-XaFuzT9Z$XQ-U3tVlVhBXQG9@R^CtzQjqfR}mnm5}}gxo0$
zQ4BnmI+-y$r1%22$G2SU9@y2%LsS=)RjCBZXyo$~y-g&#SA1Wt^}l{C1j+4(G#p$m
zh%Fl~MSu|;a*n;q%Y_Tn8BE4zm&t=^8HP5jQz@@QsJ{(uIo!(beoApf%+PUbU&3;z
z2m&PZ^g$j12&`8YTO8D~b6QmqVqz@u{LZ<ULUH;CwF+rmY4NpY4ZfdfJ9r_n9N@cI
z)Mozl{`MMril{M3$BTu$V!9SiNV^*G)1-?nI+;q{nIlVJpy9r~SuA%6vsF4&OMba+
z1Y*5y#5DJ!a}So~?<C2&Nx+IPa?&4WB4j|{JY>#(gftrduChYj>JkmrJUGJ`B4`~6
zLD{5`CHe`o%x@GFd?lFg?Uu}7{aJ<<?J~|LIw_&g98sw=t3nH3cWdfQcVkl|>1#yX
zlzni>xWQf^m*Y@|Q7~8SOCV+!jF?Xdg#3#fO9}+(HBF#c)FA9;DTSuUN>PKR5IUe5
zl;Cl|3p1<KgL>d+iBs=$<juenePOFPoe~^ziaqa`Lr;!{;e*!;qrTS7IhwstN*S~V
z&5heiz<tQ2D3Ze%#PKs9JM_ULHJ<?D^s*m*rY@jG&O64{0@?6{N9EqIXw@6uQ`yG}
znX50uN%oR6N|cO#JeTqTk-%E4f)n7snoGQ$m^n$Ti(AxKc1x=wldF^dHdhfiSh56q
z;{sjmZnPqF#(J5%XuEZjmH;9h!|mCcX95j6-TYRgs1^6SHM8tl4ZKW|$+^GbEDkjd
z17%FCvk^?Yb@P-zH>R*pSbWE@U#MuD+h2K!T1Qz#)e-wlIJmxqq<MH(aR^xP(`YFa
zavL=*7jzjEBPgFXzA>i2E7hc2orEA+Y79o7u)6;IPJ@tCo|(JsMWFLz6fI)n<P<~M
zXBvIO@!=gt+K+U_S2k)YDw_)xy&3v43`z0JXN|JGd|0X{%$#i^&=iFuU}<u6eLT;8
z#?aH244J-<$&aRo{KjG70TLX7sjQ||-JGTKLaym9)hm^SRTv;c{BM}t`HcffHGZ+-
z3%!9~o6s|*mqE0rae3N-V5NefOlV>tn#*ua>o?fC3}Op@p?`1dc_fj;T2grDd4AL5
ze@A;a`3&iD6NpNrY_K4j&V54F46!4jq4KIT<ux&Nnp@OG2X-Yl0Ingv$I`iq!Nb4@
zt_B89#a!<6tNKWAsG)QL%O7Jh6_}?i%2vqI@iG?i%3@_g#1RjgBjf6)TD*9>IejP|
z?EyK6q)sl>YjONecQhWa7osl?_uNpLiYiz*qKrlgNB1yD1>=6J2m=ZG=2PE>Nsw;2
zWt@a7P`{@q&8Qmg0DoP!1D~7gw87dyIz-Pa;oo7(+P2ffnyrGCck|+O1u?ci0#BgY
z&CYo=;T&R6?L{)>lEU(hhSCooB<>cv-OG^5*>{tVDNG&_(q>wtusHLasbKR^9>a7*
z;@v3kGQ__tPdE7x@~gr?GPpLCRO{kV2x))CVc^qVj&v=R0_Yl8B>m%$TLbbn%h!om
zcVSE-d#=c>Yv9rBh5T(iAzE5HhV`GvfCy3eRehtLiM2Y;-_D#wOdLr##?8~hy~E#U
zBbYUf(O9SVBI@%;nxS#z(}+y|wt<S-#i7f*&sg)K^*;YdE98H+`EYIkq$YZp(AO&M
zwBs%B{jqfg0sjc9CCO8D1jr&(G^@ty>3p|7Y2*0yHz|~Eg#LJ1nLl`Qg7lJmVfcPt
z3{2x-L<XQbjx@{A3`|?QT$$|bL5VEr{4D%cNTQIOvlBikUv#_qeZ4@~_<DU6C!hPU
zbl_K`e|V*c#tYBtSI$E_>^F(C@rM|h-S`D{AcK(*0psXjjX&NABl<xJ-JJ99i63?q
z%>-=5*3D*ogUuX~kC*)A@p+O~`!*Td_hJ3cT7>!9u5l3=)wdc<<n8wR<O`TsPrd4<
z-NM3Bh^Fry{HgoaY;5&hE9>#2uCQpGv;~Z4PD1C^&uCkdBScRRcf9)S=XZQd@84h2
zEtvTOfcJ11Rt!yB3EhL8A*Evsa@dd$3^uTX>XwIgNh1Z#x-3S~5Kt}}V#n~~_98(S
zxD7-M{4_3g9V}hi5W_so@t-M+_uyG?4leIdp=Kizv6i=+kR|!t1+wsH@#;d3(IbDX
zSZgVW36vs7i$Z!AmD}26*DC~vmbgAl?T%6H0FH9xG0&&OvY*j8d-T=`O+@AH%rb1t
z-D`zw!8;uBUNc@SyJg_bLD!FNl%+BGfy#L2ZyfP(-|WV5(idp*L;ZS$cB@=4>V+R@
zT|F)yas|3pKd%2aI4PuGkn6d%e)z?m^Z7$gfH~p%2^^OoKeVo>GU^J`qi&@|MYfAI
z0>vwe#6;6=SJ0t_#akXl(9A2f+O3yNPunow?R|vL#@dg|JY$Ot5483b1`VbVNVSe5
zE8-gv$Bded`&fizTK@i)OljQds&L?SMqAQv_1YBZd>+y`P&G-@m!r=gaSM7o=(0lQ
zlX=M`r_n}?jQTxBcqnm{9qtmr--G9sfzrQX)5_?Ujrd`gOgGM{?h<vBb=@j+g*opY
zf_8OtB+YxFHk(twlD)Qjh>`lu-?x`8R~0^gm@}Ig<p}>@6z(mcE~K}|yFGi`xaFW8
z5@$53xNxGQd<4cnf6qoH7Tz2(&Rxj+MgjZcS0@YayOAd=eT-rD#teT{GBsWf0Y={3
z+sTCZ*(J%3Z&L~kn_calIQ)&W^0#vdTM-xxh@}s8gGiJw^16z#!vOEg?^_Z556?T;
zOG#M)314&QR;h7cpF6#-o>pHLQTr0+kKV0ZZ^(4-ZX8Gcjtk7aZYd__6I9%DFwub@
z7aW+*c3GAs&>jzeD&``Xo&_2VfetS#Y&EkhWK%Fp0&_l}gJ{x*BlbrqNf<F}rQUK;
zC{86dTr3GIf;QD=CpG4s^acfq(T#pcqRY^kbo#09U*J~Z!~)exIN17c!~0F|7rw4-
zb#GnF^fW$Sl$5D`XAp-k%Le0Vl+@wQj5SkWTQJkNhp(R)w%=m)szy+C04M<yuLMok
zx*sa;c1VyOW(t!br~%ko=y1WOJZlUF<pQi;dBC(=vLFxK*UoK`)+_X{ivy~v(lZrZ
zdUwC7OO=b~`g}y(yj4!-(5hF^7{0{g8=vn2c22P|w;V_t1q6AQf|#Xtn#+fo8!M9l
z$Uon=umB9CP0UZVpTK7n1?j(ov`q{xZ9Hu4T`d3Ow$r5k-?Yw$8e&z9Bs59+S+F$(
zUWqy=4ZlXh05`^FQl>?vad9-vgos4frwd*(z_qCh0hgmIE!Fv@+d1Du9i}WisE;_F
zgmg#RBz*+d{Se!OWb#Xs2`#+|IciEqNqNd8G=$(hO<Ig}dhshof`fj*Y(M{Z$V9jh
zaIZNWfmG#hl)49b>Xm-_-M;MvNCABY$t*udB!}ioWGxu3Bb|G&)tH(iat_mt)0s)<
zfmMnhi)xbGhy2fH{riK%M{5h@>1a}KxX7<7KdM~w&Gu%f(st|`R_J~iV6_}aI!wmA
zYxoKhc6$-=U*Zgm<m$(!gn~AyGfoyxeG$km98GYWFTKGP6^SEEk=Wk7!7Lh$Bj(0L
zxale@q~xQZiwzg4R>^S)3qRn2G(z`}2RLccTc0+1{ZOuI78h-nNnD7aqFWL9L{3yH
za5truu->ImXl@IA@Ma8rJ1gB;JJuE@AmV8I?~<k~e<(l`f_%>-4~2H9Hv#8_?0_tQ
zEb~h&O7cWZxa6>m_Ei(K_+hUb{QZ2XC9gS21Z#K(PXl4LL=fI_*dGa2P9tPA3_#1V
zic^UvYh0)-%tVvarsMFS(I8O6_SKz2wC^)es3({To)Ocbu+KJ8E5>6#kqWTPZ^w!Q
z?QyNm)P$5ZvLe?l4g9o{jn1wc3jSp+$OnUBw*PbCAe^-q^NG&M!g`!#s5PL9;a29E
z-kj5t@jLE77OpqpZ2RUz;}4C42jJ)Q_QP>Sf2QRa?~h-viQKxwl*58V_|R$PuCM);
z4c#?ZG}Ty^T@I@G3hk%;BIC(B6hqy9Ka=?-9?1KjeLpn}J*0-LMdC&3XvCbIR5@D-
zEfEPV5?_~RTKguut>wR}eWchgvBg}Z!u1vYsSW*u0-erz|9a_w_2ZkeXfFWhqI!|q
zuw0iXFzLqCV?Mgn;iM15Sk!cN`#wFI%Bp@Aj#(4duY*3}gFHPGiv)kOqPPN(vfSdF
zR>HO7Tc%!u!_*wl_tT|te%?%dud%>h(!z$1n&iq7d3^54^}Muuy&eX+M&&i~4kAt!
z{Q0e|_R)hMecYb#vG3~wo`5Ek669F8k9mBa9Fmi7*JVm5D>82S_C-+q(;Y6uq_;bY
ztONr0s-+)x59gm*S!3C3OP{P>??FvzX~=8W5T=Spw#>W_6Hr)WGql;VVd*Zc*?wC}
z>2`13IJMc6zq7J8{Azag1!<Pc2fq5aL$CQ}%j%k@Trsk^tLRh75`gQ)*RZ|9+)H4{
z*`cw+`y*<5`alrVis3fLXd^UF#pVKxRc-a{@8WxrXb6O^bk_Ek7ZO$M-`2yZ;Wsfw
z)|TNe2a|Ss<wU_vn!_$9tT5)bKs%ZBrrFjz8|3DoTHTi&_769$zQg@(#(#c#AQGKz
z{lEbbkhM=n=)b+Ko0vM-J6pP#S=yL7{|BTcSWU@(j|0K?^X*Y4AfxAo^uZ4}2>?4Y
zgp9B~y1Spn3NdC4=NP^w^RRVn{Sc6@&n|U#1U$ak#D5u$eI6+lt%wE>Opj1lMN&{t
z1q#RNt7f3%ehLt{fOnl0uN<FXn@7xA&UuYYmaQTY@qs_1l+){UyH`()X&LTel^ian
z|LJ0Nt>WOuE2KoUHKdcrMR=df|GC}Aq3%Wq9!@v*yS)~xx^_XmZK_<t%XtDF&j_=(
z0azLTk}0}_fjZQO-~y#KV9&qV5|m~9`*mt;EwkeFWP@JE&(OQ;I_vw`PmQ^oy_;91
zh&$@%nf_Fh#DwlCD_tyhxH_aE@>J(Oqd#yhLaz}#7cx6oCud+`9{P13KQyXoq~#B1
zo12`n=+l2Z{&;8O$?hYx<5By*35yYL?FKZ}dxfn?3fgypEVBL>^9Y|{Q6DuBAe&4e
zo1}Wh4n)jxoWtUMR<lDsZ4eHU7UXPVpn=Mq<hF9Mp4O_2u^FNL(&@CIS{~Kn2)gBW
z1{XRtNXtIx)?{1@<~F(g5_Uc)G?_>e?|zj!cV#4Pkf2vf?Hc^rB@%?wz<uM{2?cyy
zEs-KVQ*(*DFkE))k_aQvd)63uF3@vy0@xT`rH{-|-AT){BXY9wWa2~AX=&n5CvR_G
zmOy2*u-*p7Krj?F@=B62sgnjdZw7hqo2WTl7UWefD9?Q%zt9CDQ>si+v)o;ZvFX<`
z?F%5JuaW-f8^#ecaN#|g(D&_DTm`l#5nujDVwU0=IWX^anFR_wKyptm84Y2Hk`L#U
z(O;s-Yd*?xQu(ojqPMZOTR*ko$XOy!Zf|yaR%<t%AOxJhzSGf-s@RKO)!o2alsg><
z+PU>y${ZfCl=?o+X?bhi4-Xe$6S>{JyzQUamS?IUMZ59G7L-whMcpUKW&zmJWg^fw
z;D|;^1&mE=4)+#^pQGHfbO796C3@_SSWfzcQvYxhONdH(`T0lEox3r|b$UI{?T<95
zt0$lIpD!%uZ9!_sLc(=*j#n16Qk^fY+M~UpD7!nae|Nm^Zeub6!?K+yU)YKJQ3nxE
zNMg(L!nbxJmxd@rzRl=iGyr0Vgzy}0BB4XotcZNxGI+zPgX$&RYUH#Ck<qSpFIVOi
zSk!(;2x0!4u_#p+9C!g99&%bY5gg62ygR66m9x#ASNV37YSByAzP4Mr^&kH-U=RgQ
zg(7?sXaYXvxc{&D0{AaG0aHtJ3zvU*3t0c>BA>CFW5V#6`mPZ*If*qZw+o6Xt!4?S
zMq?}zn94#@<EU&OPPl?ZEAq;_<d{Gq2kpCz_OkAL!T-Ru@9{NRc%8|#XuNTFV<<4C
zz(ZXX7+|sAN;@Q%KA>73;Y$Tuc#(-88JwhQXI5Sat7YSxLk#!<qaLYAMXt8}lyhjy
zQqHb*>0)iSbE(9LT{L{CP`<oR(w+lo<x&5#mxUZy@V&L|*3uK#@2TAWwo}C%oFg~1
zhi5`dn7A_0q0D;c`)2Ize*XP}ui2jRQravLNW2`+?HbS<#A$#KkC>q|s)j1iOo1p>
z)zppDZR`9DQ`1aSX;@KDbI^|pY_Z5`1*x8C!mv>=PA$-B0C)Trp1Mmp@gvI5Hb4Te
zP${=z<ftaf8XeS3YbLZT!Bm(jw(g>-mC{XrItg@W0cVUdUJ6h04it1TZ)`EVNiU2A
zcyEnej4GeNx|LdqDaGcziH&4B<mIB=Ss4djbUR@(l8C`VirvEcg)ol>e)fV&vt%4o
zQ@P(gxpixu@kW&83z;N_G>gcszGi2z&EbXFh)bXzPEWt@{fdhO(bZ!c+foq?m#54$
zw#96>o}wuc!`FnH1IzMSH-pY_fJE~L3>HHdt)b=T#*ePP#a3mozJ!*QO)BW#|NWNB
zLs;BgQ<Y6s)&YAA_kaoSpr2hfu}VZ!YzS;n%hC)8AHKOFRjh_@J>b#*Fi8}=2%)f_
zLKBWHqEW!(pubWhoD`4n<2Z}KljU{mCU;YQeVs*aH+m6OFIJU%^}mi99Qig6sB&!&
zzvngO2WEu4XnYrswgX!t4|&?@@O;j$TQfxme|*R@vo4kjooTaP#)ncu_B-eHmnT&W
zn^}hg*cKfU>jIAgs4E~<9+9&9)kJGhLP8nDrn3n5Rc@(yhMsJMG&T6sBg*Xvmwi>L
zbkb$pxbPtLz}P1BRn`UH=03mx7Q)#A0}=}LgZIkqwH5hNToiWHSG%-QqIr|JC!%@p
z(=)g8xgNXf;J1f^9G=ngK;srJoaMd?9bFX~L+$(bf6-`p?e7ILeAe6GXH)&RL_||(
zV?zf^XP3{03jCje7yrsu%U0Wp&E!JyJ*=s<pv|P0Y*fxwbe@swp!`|hyl^)xlOc{I
zi}rn}xOwGt^Y8mZjK7dj*=kZK3g4FZ&BPBrEYqLSr219r{uUqjFacFF<Wf$$DO&M%
zK4s0%_cORTytX`IfHLpbGt@q?0Ax?Kb<@T4$J-7L<p?=-N8_(ZcrsIN)=?5J7!bk?
zZB2g`4?58n>FrmjY?3%)Rt|eM8SLv7K03*rjgI)&c)zcR^JI#{yKxb8lg)tfL?m21
z_K5Uh=T4pqW8~3yZ!;N3KGdA)eZgSdnOb<Wd1J9!1le$z0qE!*Y=e6u{~UAMieuN3
zr-L%ETBtF@JK>{`*QgxkD%*E^+QucV@4rALBGhaDh{al46xMJm!9-wtXV%e3n~zv<
zDI;pdfD~>{>a0!b#BqKeKWAc9-DykAo^c&KVm9O<&di{TgsrUr+l7+O4Fc_18vYZk
zUU~Bc2D!HE5tx*;n`2-qn(rLp0T<0@{<|?GV&|~uIn_mLYUZEm9m~GeM_O1T<B;`;
zabW1LK}u6YG9aLWl$052<NBq#rXX&pkjJgB?)G*dcF1ImF!iRVt<ix*BB?_`;$26|
z)CF>ge?qiN2GR{*hivfd#Hp|~P7p&`>K)?1iC&JK5tyBoxVCj^KauBx?P(`Pd~6A%
zXioUWEUhe3xbS7^L>Q9AZqPz<h(zlpi~ReHGo7hE+h-87t4B5|A8SE^mQO_!_~Kqc
zSsLe;&5gI#VXvO<xnc6h6_vTn80Au3>^hFlvlaMst3l4zQ2E5dao8OxyQvY$kq7BN
zI=@TOU;s&akNz)@p$7v4VwRHL2Zxc&-jBjxJ&@>%3}r32tf+6;avV31CNd+buE6N=
zOGD%WhY4spEd-o3MZK)Wg0#VgblBMNn?#~3gqlUb+=x6poI;t4Vkk<KcAI7o+Y5%t
z+ldtFn_FTQ7w$a8ec*H0ntD4BzkWm)sVGg~dIM8z@0L>n?CRYP1}KgM@8+nQeT2j#
z`t0aJ&fYWgos=9|j~4l+cM<Yk&$uaW#?D||*)4yW4k%0S$IWbG`a#cyKahi2x2+0C
zeq%cjdPD#7kXsiKA6N7#afd|tH{}TY!%fG`(Ad<--ro8@47l0f|7W?@DN4W+)tlpQ
zlZv8q)B-QVhtSkF`dkHh%xJ@Dqy|^o&s#1Cl-+4HnF_zj<-DBjF)OfqWq&Fc<JrFV
z`rU{y7HG21N&eZf0Awh9YP8)@5-BlhxK+N5u$973wYvxwfp4`(r8Lr~%n4i>ccD-H
zgbv1dQkmasla^llP-lo)A6iy=MrNc`wKA4?<spk&(as{-oV8*E9|6ixHkl-q&BXd^
z_k_6}Erh)hL}>q4y|ExGq&{^MF!y9SCQa79!-k3{@yp=}0F(Rc?9;A1B~EFE{E)Fd
zT*>~q!(XpCxv#AQWHCGM{*tm$6I(qERnxz;pI11v3!*;8=DoNaOO}RjBIit9j?#Ij
z>ac1Ax^fxeB4e1LnvEQ%RC*eXqLyi=x#jdqPL*<rFZ$LR0t;mxQybHEjA+q9)@$Hd
zBUjEQJT9+Jf!BU-W>FRLo}W|ZeY>ZmXxC7KaU{+DSig{9qL8bNu8K$O;&<>xDO9af
zOfoEl{Sy5;(v}mD2fgXRFr|l-Io+!V_)A3vhAZTRcPt4mldR>9-?2|J_V6pH!v+|l
zA*0jCbmD`*X-))v8FgWp6Lw9d{XD*SAPAGVi;v_*18yP`8!xLMZq#xZOfGUSDazU&
z;*-a8eo_fi{<f&NFVu24`If3{K@7JOjR|2^t5Hr<oxoJ%Ns?7o{MBOrwo1Mi+BCus
zg%#Z@i-iO$lS0YTB90)9*UAP8E{FSsp$HFK<fhjKA48E)N60;i$>O0q?~D_AAw*>A
z@oFbq09T9u?2L9*Lsx;Klu?Pw@!_OAwI&{~->q?Kh<L=8G{NZz3V#9YLyz<tm&-Xj
znD`6^k&lonlpyXSDw#h7;dvq(sNY_;A0Sml_1p2exc6*NyC+Ph9;cM031E{irynkI
zsFd)=zV&>8Jd!8ID3ZU6G`q)aV-Zr;#eskWP^R)>1hwj8W=>?S@C!Kz$DsezM-Z0J
zAeaRG?DsfsEETL!xuWyG+dTZ;Q}=;QsOA4sda-h|(>tpuHeR)7^~cekFKQMGt0mE{
z>09x&Mg^sAP~ixOe`EtRp5^?^0P2m;r!zoHwM)zlD>e~5L{wsa9qYi&6%KU?(-!?2
zFqNl_Iov>)TH-?3@JyZ_$6oc-MP~u!3jO5^=;99$=VIJ&k6(r)?;gDJKaQXNc#r+E
zhak#<kwl1t*>_8_-twro9W&<E<A;w%PNp}r1L&^>*EjX1qL^%m3XPtwSLXU3f!XT-
zB^xBQw_hwBu;~(nsA1*)6X6=bAY|ARP|G0<t}$G`F;dStyb5!x(CFWF{|x0nJ_r3{
zgA{D_H;xw11O=u;kk<U!mQ1{PHER#mcgR;iF=xxwGq<zkvbv=7!M{5LS~SVuBiwAV
zXS|vpx}hQaAY}@rD|h-X8paej&9a~K-SBfIX4tzws~UU<7Z>&AY~r`q!<<(OVAK11
zW&i9HirgnH4>y?3{+ew--SJE59}V-^BrQQ~PR!AxYNX(2Ef1rL?~{x->2Q`qIwv)f
zNa1_CTF1RD8CE*i5~e&_m_vs0daA3=86*wDTn+PV5(u%a8V(kGuU)&wDan}_cRqXU
zT1)4s{zV=9WVF+)Z#nQ2I;s(w0Bk*PMKu_Tokn@Gn^%=Xm5$4?jQPSk^l8^wXU4D{
z?;4e2?aAqWRf6SVjs8M*VP(bw`~shT2x6%1+_&9IZZD<7*>|R2$r2rMoT~^E1IM;G
zl`0*K?BwMyzdmB<CtP)=menbH&G9fm{^szvPhQ=JrG0Uw?9T!WVEkLC09HQNi*AA%
zq@AV}qCV|8xE>4jUb0~v3ox-dX}5a_qCQrABxiH@S{e$uSl-W{J|d=edPSIs{TxN=
zUVB)sMEXxr;&U~$DVl%Xv)&CGCu)$ZQ>bt?)W3@7QDC6&tUd=byHEb`|JJ_#gHZzf
zBY^Dx)4rP7SQ=aZClSHVPoU#dMBw`=oxe4ZZlG=H_X8EdU=%`U^W%lktxqjQw+OFq
zRVoltat*Nk@R3wbq2~UpIFB6S^Ll>%(p@NDX&0>eO?is;XQ>j96xoi2XvE>WmbCjU
zC6z$?6<TbbcBq4t<PybOV!SCyq*}Jp-Xb+I#kcjNRo5f`(1V*&H`I=6>?5INsjuh<
zPA1vbgR91(7;_#o5os7=`Vl;B=@Ez}OZ`?&bIV0TsDgG9ad@Dd4)uat=8rBU&dUo2
zt|FyYYB{^|yQ(!nnrfyhaAHGz%SjlHhE-ihpg8na?tbjh)FOsB0c(uu8Puy->C*~8
zO8SQceMMejr%t}!wSVXPywO7mg$QX}+;=GSY%Z-<&R@}37D&`QtgA9JEudzQNai@9
ze-TczxHO^Ltw(drrjai-GQm>(@Z+0)xq=n$!|SHN@KRO+X{jOg>bhT%qUFaz0(<{<
z;mr{)5KfS*8T(3xkJDbdDdd9v#aB)r9DsrTYlv$ExZ1#D8F__eCy*y*I}N(!VHxnt
z{z8>qZPvz@xaY0FoXW8xI>T>&O5mfnaaJe$d}0TG-LqZ!_r#pztV4<?=5BTCQjt9P
zjWp8m8AI^{dJooQfRlCb<qPOgMi+emT*1f;)OT-EIID|bTg9@BDV;&=mWV}|pr^`9
zM)1~E``f#2jgv=mz34%Ttfm!FS3bf#i-?bwwQ@^tkAq5;p+ET<S63t2_+=J)<m5JC
zR!X8=+!S&Iz8+BZ-((Aq(p5!{h9aT7Wj1%XcWUH;9kSyEW{r^wXQp8Cg~+Q`GJ{A+
zMn2FIcEjN!=-aiol9P(qqvLait3Zo3SRa?0+Ko-!&#N)i<!>b?&qYm3_<E!_8azA&
zGiunABO6g@eqW1`7?mOjyfm!|Pr>X9B35(r!%q0VAMSmh!u8mll>il4$LG5fO_j^W
z<bt1oGCyVa@-W4yB=Pqd3G&*px9uzBFZT@}3K2Wz5j*Y?{VV&f_d&@fa67EZkkhG^
z?2H-qMF&N`UxZ6g#+!q@1{3jr@gCQzsp((f{d#PvH(_^q67V&?_`cF7x;zHO|ISaH
zzh!Xs_u`RDPWZWckTuTtwV0|k7YT0ZOo6o<SVrQ?nP;xUopj|D=z+W`FT7vQ{-qOm
zb00mmRX};$liT&LkVfT*0Nm}T){E`)Y(nzyP9|oqc6J{2P9~QB!Mq3n@ljceSYtx*
zdH4>a5|=%(ig^2T2pg*&-oH0x_MRNOuxYq1!X`a_)`sfsLE6oM0e&V?()nOKy^z_S
zCp?3gOFp>%tMJ!MiRK{WhsfrHsS7;Kd~%)`6_{cSkCm3QE6hUztf@`Rfmi|~`S%mc
zwzNaR9!4pVIJ+*o@}_e@6)QMDH~q+gs}fwWf-Zg_p?{SF>EjZu2(If?dwk60?&*rw
ztC9oR7e`h=sicG<pM)C>of|=pAF?1Lfj>gU*$eOKeyuqq&#=AJOV&Z1y*C?m^I^in
zQrgC}R<!Mc@3+ixY#Ml{s|_?1OI-5xG~DR#*Q85?eHW8wA-j6O*s=;gp4LEP-Yki*
z)9c$FQS)kXrc}FnqQ)pcmIFR|u=uVPfS}*L+CD^(DnG1;Eiw_+ct~buz(byFz?+OC
zc~B#mlQIVHpJ5H^KHzaQbQg7NpF1sIOKM=J{pT(UDv_?q(=%aS%$H!XVZLk8<NzPU
z{KyL5o`}v9rc4mvr!9Hg(47gaATI^;C~4DN={V=e!~Lu1=J%Y|>V&PE)Ls-<6{3#3
zxGF0+N@&hlUL{+c{!K4U(LsTgvnL|{=^<j?PLu6?!d28t&qH?gt$i{>u-C+L&ZiC4
z^O_Ma=bplyn*aG|5BJUfRb<-BX7u;x4flVX5rF?4VgJ4(NNxYw9@;+J!@mW?%?)i$
zKT)zzEZpfo;bcG6xBk0F==U{IQPo6rf_bsnK>syDfSUbzG}}4gPy7#=nYa}|hDrJD
zx#!~B?`9KjIpC--CU(om{bAz@LtR}Nw}2Q1j$8)Kyq@kS@%Y+dL0p4Z;q3Ug1>)UR
zrs|e3CgR7u@5vcO>RAn@%0Hz=F<KL|A7rV^3Jg#na21*;fMW7()V;i0Z!9+wM2Shx
zHi3?`T*_QQ-8fh!7h73&JS0vaQ?ApBdo3w+=kIWxN2!75uv6T#R3Bkrv@ea-q(YvN
z)<GUL($KZ#>q*(i8dE<gfK)og5gI@Sr<~(AMLypf8D?N8Cx1`9fBhvBb->i>Gj@C6
z#f0BG)GXNDAmf!B^3?J)kllg{g}U;u&ic=+eTWr_Khed1<WlF<(uoa#-PXP21XoPu
zlsVBT%zY43Ir?>|6t(txVtq*l&VVsCg^WNHQpt|6?Do~K6hUqS#Tz1Hm}5$sOCx;j
zf5UIGJL`38%~f8Q>!Ury-D7&$JB1RZ429A>1T?1wmIBU4D;>WVg{YcZn`rg<EXPS`
zQ`h>>uWFI3XdHQ)Od7uepd|eX{gPU=5|RwcVip;uis{G(i!74din+>jjDZF<#FO6F
zu4g?ww~#{Bp+9GzlM-4OhD!?<;isBQnm2DKSkP(h-B+40wq;){60t+~wb4SdzN#Vs
zbSJ5-!!bv;&%9=-HDDc$&8eAEs6^@3YsqE<RJQ5^kEw$bUk=TI@8*%2_nW=29lGoC
z!~In|=^{fvzOoF`;9tj8_{<xfX5{XflG8lBa+4@VmC1PMrpjO&@HI9ca);(%L~d1l
zr%=c0xqaP$#m_Ey!;!qd^}PT4w!g)?-h1`5zpyeW?vC!6bFmVfE8hM`D2hyOEoRER
zA=2i{X}7bZ8~&6Q;I*9ci$8WzQ!qrW{!72s8u)h#J0!{<&W*52F#+LOxQh7&##Jm!
zm|L(m;q23;USm+uvrldJv#4b-=h@TR&~6{^_bN^#auu^nf2<IZ$p*dhu7-r<*T(?T
zWR695*vB{hXh!NA2V!J2ZbutEPRa|r7O*D}-dZpfT4ADWz(>rJ;4Rxg-`;E67y8z}
zd#ey>dgXa}>Cfuh@V2x#do9_!5S*`M5L;2D??pqQT@@dd(ce0PQ$83SU!=8+EYjus
z#^~p{m4`>P*MEteeZ1xhA^ybSv~fW|zWkd;0Wg}I+L=088vlpgze&UPlSYjC?%M<A
zOcYuqt)!eKUt*&gtuofkQny#9>Q#Km3N0Dn-uNv5RA_Xm^pi&n^61H@DMjF0tFV1;
zw3XvysIIC*^=qGwwFe?|Lb(qfZJ?9t2OxF&B%XeKGbKH(;b=%5pk>--L$!|U58X;r
zUZC~Iqd6iCMduNnk%opW)AQ>xBYWcS+`sN;(T5UooSB2%$qAhX<mj5-j^{`iHzE@p
zxl{C@pjNI_X<~@1#F&t3xM_oUDIj4JrZXKSg1UT>?{D{%PSQc>1YFOLyL_H+x4;5-
z*23iKH&+DFU&@!*cwas_rsLdl1=`K!M;UDi`(+At^+8uuGL?GjEiD<L8p^YmR&sdt
zbmZjUAqF9;TqwXC2<S~}&3?OLaQ(g-b<U7k>;TOr6sS0)$23*1-dF{#Ch(2NjIM#@
z8pvoKmWo-5NOc_yT7&9ca`)&HkOUeTmajB_-(+nCAfeonq0^}^T9bDSx}>?R26<jO
zZ?_Y!VaA%!;n4P-iaReQ@Y$bJPT(qK@ueVntd~zy6Xv%JXPjF+)gXeqi(IlOs-dk$
z+)L%?vYHS(frsS16d@nPrAb4Xlw*-A;fH)DUFjuDyk*3d+r;y3O)P~yT?3R@@}A?E
zl=|jFD7EfTAqCWfpT=%VrszmKzxVHGTmP2PqTnv-ZG>a2sospDl1#lr>_4p6OP85`
zhINaQ7Vh)dp2PDVv}WGEXbT3x;4)*4u+o`A154}vq(mf#bk+@o;JjZ;X+0^HpdBQN
zRQuE$rhQj7KW2i=fMx;t%M5g%C+?x=@^QaoeRu?XyxInUej!DKi~h8L`E5C_6AJP>
zdw7fJL6K{>0T0wZLHLt4`yMR`LJi@Yhh`vdU>P7=C(=Gw;P$2V4-ZI|P%po3!gP_x
zxXd?0*R%$wDh}t|MoKH~x;9SwjbaVK*yyr%@MKFqU^V^KK%Jr9RUI&LDMU;?#?TEY
z#mVm23?yINN1X3vE3C-M)(g)#)0)U*Q-Ic<MsV^d(6xg2>wCySQ|y{Uig?749%8wF
z27(pTIs0Q+Bv#xPSr72LzG2GZ2tbKtoUCDDt)(-_?(fXgkOQ?ocOM28me6fu9>lk8
zlggw1qSpH2x}hIQ(E=dwauf-8n*-db<7XbF9`9`H-t^-dokt;1(O<rS>j~p`v^*-9
zPKdI$d%c59w;!H}R(rYiEk$0EUW;b$!#2YO5E<(ZGXWyKCE1S+zHhSbU_u7i=&}*f
zw%<@D2j9=lXZ~EYflB4nB`vniQrV9@v>RamepeOTWHjg@(gOTB1BPXP1$pNVrQWCP
zqj(Qxg32X~@C@6&UkrmoT}GrTUdZDkWe>ke`1v&6xTU!9DeCLpIH^j=JrSHkGi~Z7
zak5Rh^iw8pM-1~_xXSS^?s(h(G#0Rm8h>`T&X^1Fc4+|pnbXco5k2!3Xv?b*Q?z`u
znFi}h7kg^L%MF;iaij^mbEP6N^pXF{nAT~XmURH5LsPC@uvuA|jXG_;?!_Ug;0v_f
z8@a*?r5sC<4nLb_o)B>6$>?2E1Ht!cL2K$fbsnb?-<;_*<B1Wiiwzt9o!0jaIy^3&
zx~^vd5lbkobPl!UVD4<t0bUV)Bq{OGWbhPpo_E<B&;|m6-M$~)2RV|sz(;gg-;+9(
zQC0h=ucC}Pat-SSDYYSv#$JqOX4%PAwRwNM9xhOZ*^OymE>LC<_?xH>;D6_$el&3g
zw|4!8^(E_RmLEPa(7v;QE%5osC|1`y=*B(;nn*U0gDg;WMuf5tYF~Am#`M6-nf#O^
z4vWUzBpE0Jy$f=Rj8#DX5Jmp*@_u~S$F&HN-jFJtWWITK*`Ll_Oc&HcYe0Xe2v%CV
z4CK=o7vvfftUOCz-i1guGZn#r(D&D4#tfhDI7`KNA-WDibNH&tx!k{SUtI*r<!C1!
zoZQTo1P3R@EGf-vy1qf&>o<$|?sPB#gdSXg{s6M{Pt?PF5;{>DS1p<-zj5A%q%X)B
z3jL<1J~4tE`VE(Y2fM7&Qk&QxHV&tVJfr~u>PHBNhlTwIvb4%o2Y$ES6$P!#;7(t(
zpE_ht85P%Whm3u{ZhO1Zartjnji?;qaT~mzCUW-V-Bmoi<Q$S!^qy%q`y_}r$MrOj
z76{ZZ=w+i<g}+1OZ9S3g{48uVe7^AutjQs@2uEDm#zGL^qTn^T2-)xEOK<cvxb#)I
zm<Zm|CioH$&(^uCY{t~$-%<*uA0VjGQD3uSPT_US<E62is8(fz97q@_vWk(sn$WcE
ziW!rPbX(`O>!W)n$0d7_{g{P2zt712eH<Vx-Hu*%)!s)t@Mwbl3_$zpyP<yDtGS%y
zl2hcmg3-vwdOn+rjS{Y{7h1@hcTuanOId$TvoW|Mz7&o{IW{QO+Nsb3TfAqgFPc3J
zXYAhvk5a*J=xL_~A6CYc4^BTyUvpDrpHqb*4Xjh6UO0}QC|@M-e96l-eb&yM&jqGD
zJA*ps;yQj$Q-}URyCtjXO9s!lPg=kGdlBy}zO>r!o``1HNPYmG_y!58m<ix;dYAv4
zC9ZOri?tMRs+x6Y+nbTV^f0-^``alwZ<PRm(R5wJj@z4aEoo={Qann_C@jkoUQNSx
zHPs?CBHn=v7E!yes-s>~WVecD`{cj+BP!$sOG<90cRc=p(7mAI!YTVH*&*>$qL_6V
zWc=~z#BX90jgsK!mSQb<0)7G(nQ_8Fw}Z(%nQoHepduMo2x_pS85i727|bQpd}*+@
z<YUkN>Bm2xA(&!c8hk$2RlQG^-#@Rc!2bb0ncLf&+n74oxc;Z*BKm(^EF2XLzgb~$
z|Ml}>+JzF%!~|WfJema+Pr_|<sb5wu5K@-vH2g%MekYJmF&Gio$qIO%pSNL0q&BHS
zv6xL?qEP~!Z&eN~qseq%<xMqz!!@69sxj2$MjMsITSbQ9_XnS&^g$fW8CNGK&}0N9
zj*?-@T0au+Yi*m)9A%yuh+$7|C{5j**3+c#nBa8S=CPOl)#-G@SZqk^MR&J)I$Rje
zd>E_I7x)F1^hIY9-q?yicjA|f7u8CR^l2Tg?bZjt?!}cJT*GKx7M%k*Iwy6+Tb_Bc
zf3{F!y=(dE&)>tIWQZgaEKApt1BoY2BwRSa0rKLpKn=nNPKoZZc#-0Cx6vNX44IA<
z^L0~vI^oVDrv<t-L|b!bGVG0Ni2Zz&knLUWZUiEoqM2K^33)E1LD0HTZx*f1aCEOJ
zL5>i}I;#}8M%YiFlJL6$6^|swtexdhfxmRVI#Pflc{QvJxq<1<QzG~>lO9Z|vtSc#
zV3Mjkf^Q0^+UuTvVaf9drj^a|qzXG)zwbtRWEAdMA|?FshBZ{dZeTiJOnME?%q0=r
z?9}0%@HcM{mJ*s}b-r0!XY95^?u^d~KRXu)rXEx#Rk0L?Y#2K1<y?o+`QbiqxmsI)
zzX^49GInV+wD~+kRimwMOHAL=a3~qa+CQ+%Nx>E)ZL;0hDx%lNtI1E&RP1P$j2Tp9
zJ}r&dz7tXf(GE4%)*F$`Re&S8G!fAX5YMg(K!6)SY~HT@ZRE=~(|bg3?qeKF<Rt=t
zXIm|K){n6Fdx&!Wt1{0R+8Xa*63ri#%)zQe;qOlSN@rufz0|2Ieb3NEO#y8C5j1+2
z<;m^{)??#UZ}Sh4Xo%pMDJJnhE|f|lmYNmOk(D)-!0|eENZJ+3;V*VHWNjgp`Phau
zNXHAV>}!TtI&yr_XE+)04HJ{#a^3+4HBr#3+525-fi*m|%CgylXk*55rLmZfUiftz
zc?+%&fNd$5O?k?ON)N+ha{bgR+M4m2pdt0sAoYq6EXx+p$M^C2YyHjhBhE_Qm+Eo1
z+=Mq#aKr(^&f!@fSClMpbTGCL{HCs1o*xrY!ut}|3E5)zjCp|JytD<TY8v1e>~`<Y
z!(&8Qj6NgiaQ3J3_lQC5ek_lN1bQ%(ut984tw!%kWOa5;GCo;gpFo&X&G_jUSj`AN
zy*Ml0V--abb%4R*GZWu)bzDN_nia-%xJ1Un)O$TN$u{$l;-#yhJJNK^itUAf`SJVh
ztD(?`7dc1m<~?JdQEOo+Au3?{({<w<KLZs8a!ygoVuV)RR=VuotFviQ@Ite*@BtQl
zez9^0A>rWkbfPA2Vu)qW8M%npCM!5_-7%zWoar(nVlC8kj2GwEW0sNa_U~!kS}TP1
z(}~g8Dg2BW%vY!R5i4Uw=r_0t^kS9Cw_4U?`{<lz3ESUZEWyS3THU~hx!A?ccy6q$
z(pEI8Nm<~j?1gZ<6#s>NU?7b9lt%rI96MAqx>I+<0oJ{c>ZWn(^HLt+<yPn@@Xt@&
z#IC-#6&MHz%ICH9-vcQB^};!u8af$U{P)5Ifc(1vfM69vg-=}WEl1xm0z!%yPTac!
zMg<bJg2TXSr4g2y)|R2FWB{pIR2cokr+F_3vwR{q!OQ*dVKduu!y(^~1|#A|+OrWJ
z=C)J$5-)+g#(J`M@GvcM-}AS04nK7~BPKQjqa`L#Ws-~SnHO7a>m;tFq~;==q4O=O
zYviWWG+72VHLHfkg}%W-^1{OA<+0oTSHUFaf({gp%ebq*eWMW4?7d$o7NmIkk;s~#
z&c-;)wW(A3#OUp_k*7dgSGq79NuKyIXVgyObzeP8kMDLl#nvcYCh+FXyEHmnI5RR-
zg4+h<=$P`%L&YAfnwWA&b*z1=>N3YC1rrojkh5_riM*x@AX9|YOJPXCOtq<Tr7M3g
zvj8#v-23ux<TN9~9IZv<gJ2rkjEW$PeQMg2*VU_GL=o)OH6Q%1ndm5Y>jq3Ad1cp!
z5z?3gYp{*e7Lum;Ld8*acwHS|;!>AR5TXGJ{d@|Tq^|R1y~32T%ST4dm~1n9tbvR+
z^>W-Vgti>`hovjsg_EtwR!SwSXK$e{3g<G(@TLNbpYhs${4nlx+Sk)g!9`_SxLWK=
z>kYhIMB>`29IZ@o2Zm7baRK%kgEs+O#NPXO*5z+}!=MD`PRBi=mS&a?J9hat`{O_@
zM4wBtbWn^fhKKe>^aQ*9D+>|4tn_a<V^^wxxjU(z;a`Hh6NdD)vTv8?B>;f)B)ksq
z!wTzsxlxSZ{Dnhe4B3sgX?l59T0Nj8I_JJ)Ak`hU>e)`C;)To}&qO@T9%2s7lW|+9
zM~rJu0iqFpRbo$wGpZJUqk8zAaRUHTqm7E6efbxsR6N?N{^w7O8UFKc{&&y;qlLY@
zvEhHQc>e!LJXMKuv6)OLAs4T3wAlFZ<ovhFjnw7yRd|-jYCrD*H1l|HQ3q3FldO8R
zwU5mm(xA9=X<B;hsJF?^sVL|&T4TbxZI3qgjG(fa)g-a|mrNE;_t?pZdE65Zy>c<#
z#HnH_@iPwh)7|x0z5s>mxBj;S<Lk2%eob^4O#;kGHCKWvb>Ap`xw*^f*hm3bd&2L6
zrclLtb&q3J<C`}C-DTMl`4}-}IAlKBk*x+IjebI*TA=QWMZ?EThHz=HOr$(|4!36;
z=WpH?F*BT5i4bV(7|cSJ!uL{31O0GtGMsRh)f7KSVlOT~g*Ix~p`sfEoQ*w`V(Mg6
zO%{UM@IfbpTmv^ltkp;^HAcD7_rlW_&-u+tD8o50UUxr0(u}&qkpXj?mIR&yBd}?2
zGde^Tj!18P_<mxdL$_B{x9je&(`|ZEF#peN`aY;RS}e+Jo>V=qs)KJWcv-|{)PzZP
z(av=SYoUV-m)HQ;AbCma!8~O$JEq2i^+XxBn68n0Zpbmjk`V5ES+d$%-%=k<FqBxv
zAD<X2l0q3E0iJUHIZ=^aR!G<JGx+Y?9hi^_Tp*6WnQa1Ibgu2(=&2L|{JDip{hLpG
z!JACM3gBWX;)tY>;K*c-MsS!Iv>P?S6v`WAw#SApr;Vq(b<MZ0Veevht9j#o_^*)*
z14>QbEIv`@yiZh$@V^i#!1e#uwOe>NnEsPV-T%YLtV?apZjB4YXX!f%7?^SusqGAB
zVYLz)=y5P7<Z2^hH7(QZvQgn$f@(JLM34Ko<Kv>1R5*WG$4P`Kgeh4urSHm#jO8@^
zJHR4ZdomW}k4<@XNZ7=3s`7-!6t%XBN&Ch>;ngOQF!=ON3t!VDFNJF|PBbjJ@c~Yj
zsX;Xz=dx|)z@QI&Mb86GZD6`B2@?vAZ%W3g8pFd`TeObH9G$0c-Ses2zS*>|Nocn#
zeVb@Bij;(o<^9pBiziaBQ#%ndK8gM?9ay~MiwGdEMh@*SVbaiwRVYS?hjk&Wn>e(1
zVLS3I+spVBL+q0?yp3c6cfAp1U_{S&gR!uH?Ti&P!Z1olL14l4$qC~<B*thfmB#s%
zCh3&FT`E?YUvW)4B`(dhnw3Q*Enm!Ri^L3u#o^YPs;1Il?!QE;T+OX@0khBP0?6k~
zz%0OFOJL-C)cLI|Ofx4*B?p8*jqry)?tqN@L4UHX1EB&XzTfytzy6H9aGWRFirDL;
zKJ$H?e5-d{H~3R^TrRef9>{{QYn7`W%2M+K@5^@ifmax-UNLvAe7H~FbZkzOfzr8D
zBqek{YXK{h;a{${V=X@!T6orNYTz5@xiIT9(d}Pr>tYRU9-6M7WPP_vl%z>aCsa{!
z=?vG)=M@5&9VLEaWc?YZJ0VifBIL^R%QL0B!BZzZ*x_f9BQD(vV(phI`NvsX)xonR
z#h33gSOS_*j3kFI36sUY@ZqEg!DWq8Z@RSdsy|+8yLGC*V;^o^?wy|PR{(cBFpdae
z8PVcDcrhEsn6U5N5F4hGH0zWy`b41zkYX}(CfshOnxXsUz|uY>b2tsW{<O4@Ok(Gl
z6hL&2Dm&{Gi8XfJUA(kl?>uf+?=<h#TYA4<x5TpRK%@K}nf&Qy>8{?w97F7aL<*8(
z9(xdP^=GL%{M9XBrOY%mr3sKIw=w~n3I8Sd#T*PD>Xx`>#lnVloCM~*^}&;YHLKr5
z;{3>(?n~11EgJ3)M*c8f(F3wPtVhOBwJDBVyS!s0sLgMbHDSYFueJHW#wetP-R^WQ
zOrxMp`4n~Nc1>wDd_6k(dyC`J=8cfY_M`?OnhU8+eOo3|9?9TX8V%sGdN4oU@HC?&
ziISy6IKX&(_#VFkw*-26J#rdkcV@@Ee9^K6`p>h@L9}G;;AeMe|19Nyn{i}mXZRm5
zWmn}sJ0MdCrR(CmO?*M{fTris&k-n1IiwV)u<~YXVhixdF3egBjZiVQ5B>%N<IKja
z?TJhe`^~&#^}u{$SZd*=q{H@VSJ8zq`wZo+9z2w`86lWa+8Nidh>0#Qr5iAjD@TIi
zUyAUB4!>24!&W%-z?7k^?ycWy+s1r-fjnBcaUeP%o7r~zMK9>WXNz(@Qk4|#^h#g#
zYtw~9tc+Sv=Ht!T((TaA6FX^trAP`JICZ&fOoSa}g;QPV@%ETNlkdCjPcljOC%e4M
z#7M{{7nT|5QCZAoBi<*h+iur%nlyJjSlv{^wFtt_mgFCay$iKvTNZ^*D^K4ETVy3_
zih(j#+(!D$Nr~oyO1qxgvdytWbsO^C!Yz>pxu4?kgmC9G{YDu+%>&cmvzymAh!L$H
z1~Dj;=}B(I)%EglBkpNu>fP<VDNS@qFhlIN2DHOOkc4|)KVU$6Vqgap<UJD|#Q2)i
z8Yl06&94y4UHvb@-mx*Wb!iuk)g9aE*y<!5+qP}ndSaU$+qSKaZQHg_<~nEZ{jPb|
z-0REu1vT!fy6OVhnLmD>q|=I*wQ+IKsEiok?cDm^Aftcbn;(IIH?Er*7VUfNE#NxG
zoA&$6Sx(M%Rydr;qwa%==7izz?pVe^w!AH9T8i0qlm2NX`fodeFKKZu|6UzA(%-wX
zz9wN9i2wD-|A#l;%-Yz-?yEljul!%9cpHna5qNO(RRPL|pvB1wrXa#H1+(0rAJekj
zBfe0$aWXQ@vL$ZKFv9EcI2xHl`vBedD6{SL_yW;_{RcB;vEON#&ocnYFFPrhPmCf}
zxZaet%gVy8ZZ<AS{SRO4T`It?K_vz#Wzx_|@vvhr9o0>wz>n27R^TBrfq#(ii}^cT
zB47E?w2Ey%N2K0d{&rZ@wmSLz{>V`;h{O|TSu@?LuuLeGH}<w_-2^X|z!3j>qnk53
zZ_vv9*kx05sl^v*51nwhb&3q$9u?bz^RiThI8b*EA|~L}VwY+NcJ-BxO|Z#7JxxZB
zsjV7nH40vF^Mf~nQ@E-eB(~1{IPS59R=^K;4KFk_3Fnt<T~-R6-}gRfWpG6-RZMQG
zU(#k5sWfsy!zYMUlY}^f9&<H3WnSQgQ!3NFte~%ABPe4JLhhcMGk%~mAU%jlzJmrv
zzl@YI8<CYoUmgn3u|fd9wB$8)lw`wTLJGt(^N%>}F-fLc1tgV*F`{w}Ww*5#(J#k(
zax(C%j`gtgOVZ#X7}oT5sJC(C7TOe#QSxrNHcEb26ta!MwAJ+my%$HuX6lL%(+ZX4
z%=17<iN@7Y!))$8nWpxgMS4`11<6=9gD8nFsG*M`kj8@k5&i}6l)lXYmXedeRt<_z
zM}Yk;F^V2|Fw9x}^VXUBBg-K!%0N*;>iOo|J`Cv*UZpXPrG$X<l3*_T1XB-1lrnnq
zsTxF?{wcm`V%L;ihL<&CMx0i@+y{5>KzVO?&4JyRx;+EZcwUBAAwgt+>Zc)7RdLWP
zNY}u?+>f7uEm@%ed~3{CE_f9dnW!6@_X1J0oWiy+xoL|!71IPS4e;#Lt_gPQmqJsD
z-6G$@Q}wsgRJnJu-<%5Tqi7XpmWzPDy^Mt?#zU!JQj-ewf8g8xMYlC~a&R=W{(mC|
z0E*E6xB(`rp8j=1|Gen(8%+{3*JQg`K)X)@7fOSrc!63e*U|hU+_oC$p^H*lSDynm
zamD0h$V#RNncQ4BH{M<0Z_23y2z88NWtuV3Yw@6!qdSev{v_y9b4sWU+B-q-4jU4M
zMF0ZqVvU5PsgK<{DJD88_zaYwBoLLuifRk5$iTQo97qL!6uMh0ghZ6WQ`6oEN|W^%
zGKMNplDH{}4cv}KADVcJcvnw(LPA2aMvmby>D@EJ5bH486jwrzBj-A64rn}qbEZCu
z*gs$Fw}HDQ|Exs=m(r*FBK5(l{UjVspaS$dD8-bEqtYn|SGx7WV8)Ho5UHFE!d&Cn
z^dm>xilLNCq%u^1Nh!$7=j<t3Oy>@&O~S;&h(ReCRt~%3eG?|}U)P>WKf?gV?=FoO
zE+!VTc4E&kWtbj1teVsj26O?Wq(giRUWzvTCO?k#%M@FFkNYWLTspVO<ltfYzX2@D
zQ9MCL$LcZObeAK?FT*!?X~rFrZuX-050_8KpPEE!d;rEqjhR3E7sg{Zh4ClPAPV$z
zw{5?Vn+Bx<UKl(P(m(b{Euw|1K0a*SK3G~^KNi*QUf#x^Nig)iJ<HK2Cv%%7H`9A0
z_t^|x2koI(fesM!<mAt8P&C0Vo&mj{!!zRvOYt&dHOr!fenK9HkE!sQMXJh_AU-8R
z2;#((T|oA-X!MIvrS}8yE>KE{Vz#WK?;{)u-)?5afvo#LeN1I5s}}V9G;l$7;5eM;
z8_y{;KS@N4+j26x7tlg4Q%VJ;cXCHiOUJfVeby5s-fw%D!NEA`XD6IPi~z$cq-9`d
zl}f=H(~KCIM7`Od-t?`k{+CPY-;S!KA%olY%i8VwVpZ@rI_^6a6ccBen=8*JI|VJ6
z-oWCGVkIDkdS?Sg>AYN4zWiDR=A?$-QhyNeq??{P)8X_jVNgJsx+c?DJq1#1&Pr?~
z_yno26&ss&WcSgLLmIW_paHt?M*{l~(GS1blLK>CbofPO=^pNnjO)^eD`@w|DHNC0
zU1K)GW5n7Yr=-BjU`l(G!Le;J;e`9zj3FKM+Yd8qZtWx}S_$B$>h-v+_1Jf+PoS4^
z@gtS~Zqz3)Yq;bRT<rnm<wmu~?qJ?3j}E-KqMpUdCmtz~0wp|n4gzo!=Ny!?Q|cb!
zi~PNA(tj5~8ooNJXcm;$Wo2HgCtGxfc78KctL41@#02NMXl~w(ayzOK+Rf!s6wVOR
zSogXq2ljO}Ey1}d;7}Z$>%r~KT@X`uXtFosWu<+q3C7_)_=AHKT>lAPL7$yhl*Lc(
z@&({?Au*MZ`e|kgHv&}6K#Zv*jR#)Je5w22Tu;+vk8j$3*bJeJ12qh3ki(!ohJtY}
z#-{t!v%{IDL?tJ2sw?15b87H*D=vv=Fp~&df!$^?C6-qyJ_(^H9pgm)$}Q6w_oAR=
zL6W{>UM4CQfExiWn0*Ox{VBnoN4RlgL244r*l!vUTv5P*NdU0VuAvZl!CDQJn>UXV
z3+^jy<gnK!YVxQZgP;pKyb5%&yQZiYVPwa78r=<^%+_arg1v}I#Juyj(&lyI_+IaE
z1#CpdXvwOW=lpZ*vwor0=Dm?Sh51jrNf07>i@j@OXK1;pBk~>YQNbYbE?)N=O#UnE
zx3mV>S1o2BzIZ@QytR^^QNy>|%Q9+5>n1;)?}5zogF*ZoXL+0y`mATBDrovz<cUBu
z@14G_{Ej+)ZaDdYM|!4Io^<K+=mqeuZ$P|O`2N_;WVJsw1jhad(7r7ooL9CENUhM}
zUadm*=ZYimR4*+7=1{r%8zka{x3$&u6Y#fx&L_ts!dL=2ST8{3+jXB!g}UPso5W6q
zb3e>qi5A8$65(=Zgyj|^CMv-?^jqY_CiLO02fTOz9?<Kncr$^E@t$;l%oCz=c?`n%
zck&<oy<uI+V2H$Y=2$mYgD(~qKppNhyC5*M7*LNv;8k;yA1&G~s$HO{q5R|!WkuIX
zfs$}Nf;R!}^t>aGTtDH~Tc-zx;JQ!-u&^SqdGCM8CxT65+%*qN$(5Lo<B?Lhxk_20
z1;KR&X2GH_W7(sx@}hGAUY?GtqO+U_m|V7?^ju35S@~Fx9-|9^g5;DR6pz#qVIrRC
zqBC)2hFj&IC~NYjNVqe3+}z&`Kbreigh-Kd&Z__}ihCG2sawGZp{Nq+Q@;;h&I_Rf
zpK@zg(PW|XQuuegRJyGi%`?^D3cO8#7niesTfI&EOxR@OP_)}EBOruwk7_c-McXXA
z>Kw9aZ+p2=DI!8~vfAjRl;CK?%G*?4W)HSt-J$?*JFxsUl1PvFP5b0(_0Xa!^19K(
z4=xWtvz|?Miv)*@-@!oBYsD`~SFJLE@d;R$Usk-7Ftoy|)oWZ)NLhKTY&)Hio;IjE
zF|}IpBB}lL=K=P2U@G2K(%^dMdwPt)uf&@Os$_16D!wS|rohues@qE7Y>X+RSNhN<
z)N}5F7VA21A#6urr%VWcT*OTdW=FE%;V=HrCi44|(A$fYgbW0c{@v>QTto%i7@tO!
z;=T9li|M++X(ruuM)q{v${n37B$G!P8Sa#|X_g*?BG0&5k~dZ8tMtd;G&iWf80wC`
zwj=zngV#UCN?I5h{Y!51f9wB&ae6j?A!l8;RGj%E)d;3VQt`4tO6H_FPP6r9ukw_8
z-H5Ub!h;FQrysAD@y*9^Rv(7z9gi<9x6Jh>ngno3N_N7rI`rad!KrMS&ZrtZP|`3x
z%|>cM5<$RTV9gSX6XAe-Y<1Dhx1oI}_e8*_dn94M9gQG9gl~MnER#`Z+S>4pBv-C}
zRc5UjV(}D0v-FLT(`G{VOqptsR{8sk)5r78ta@ssDs)XjABiIQA@2%~F_4K*<IU8c
zRooDEMFt<4P;zsctE_9|irACn+rk9YxlMbP2e^%C-NhFHQYrxRRApQ4ndd;^s(do<
zs9zm$-p^f2PC>32DUsAqSB#W9=)&;Z+idU8i~Mb{v`kCBm2s{l%M*3s5zk~>;wqDo
zs8+;C)2wg{zEw}4-f(p|*KhD}LsT$+*_m~C&8`uajj&F!kuVF;M}mfBo3k}bfQ)9A
zhcP0^0Q&>ga5#Vn`$HJ&Qy28e0p9U`_?aD0=NVKW$??d3p`Np6yw_;Zs!(m|W`CIB
zCFW~*zZN*-i%WI~nny9AU;Q-X44f^{(jVqzTOJ*ZV*fOxVB^XSJMD>es5yKbs=Wts
z@WOWuPeP7KO3t?;h0^z2(Snw&+~r?bsVUggbPl@&!W)1b?D_kOd+dK-p_=Z&7;(N<
zDB>^p^&eG~mS)aImS2eu|7FWgRQMPE`jzHTE=rX~OOl)^i&U?qKq22+O4*PM7)Nu{
z6J5jl;;EjMo&YW}K*T~CmAw2P5608zloTOo1g_d~q<VViPy#7ZPClwe)~K^+M&__M
z8G8pQwI083Gq{7AgqqS{c~0KS)10PO%;PN(04kGq_{XB#Ss0(4o3y<|JH6HF*aSYb
z`llyWD5DA$cnYJ(x1Et+YDh5wQjFLpwyi3*ORP+u$gk$KJCk)CBi+Q58l0AJ!@Cum
ze%v+=<w?Wu>qR6arml4TZFiV^)O<1Qz7r|Me22gmvhf1Wo`G&J+h=^LqG=cE(ysO!
z(~ylLZ9qzO9tyo;gIo!vL2_M!eJcCf=7|jQ;Y(8E%Th&b{SWuCuw-}u>uILMt#=!5
zVXkfkk|y4<2@_+u0v1MzC?A4-U4-@#z9R&6|9(di*p=Qo-b^Z3#5AbI2eEGd@R6vK
zxrm9xvOt{@4Ws+vl?gQWbVFsI#BzQG;FaX(e#<--(_VQ6b@Fr19f*w&je2SAUo&yd
z_=Jk;g#@l$QPz=58hG>oX;UjXrJWIT2q$qLB}e#AvnOgt!_^<3_FiaGa1Le#nSg}W
zpi?y^>uMjs{o}{LdT6%9jfp~wE0;~xgSosAx@3vB7L@Hu=TUuj@=1DQ=#*ZaFgY%6
zPJ8i2q$VyIdWOw-h}}lPk-MP*zL(W>u@mU|V~I4_s_rnJW=Aw&1OMMk&sYZx>c>~v
zk$r{Y{$uHV(SY>~^o;)tw;bqyq)oU=kUK@KIqSgCRm#<G7*n77G115pmvjR}ivq%l
zDTx#3uwOP^P$<;m646nPxT$pr;~!tv99;I)e&mxP39ormAZq#IMIuo!A_)?X3QDi7
zQ_A$;geOtY@Uw?z&v7a>1pFd=3RVZ7-7Z)V!YIJ8=Q7Jem!yQMQaY?oFdLZ^h>`!3
z6E1Fc#NX=#U3$uARxgLkv`q`B&;u}q$$xRxzoiOM`_9jt=I>8Zc3it!Mi~t<i2$Wq
z_?Zwpv_fP8kyMmj?Uwn^r(N|;1E(O1bqtIF(WgnjgL`joY-B=w<SKJ9x6W!5=8cxP
z+_<?v#BaXAXUbY!ji0=q`m2;RjS|(1W-v3M)Pw#erQSVemvk-Vlg<E~RF(Pf?#sD8
z9lq{*I2Cb5Z%fE(qJ7ImFW}Zjr*A5BCW^J7m_gaIkxv+CS0t!-o@exYJUK_ANv>}c
zp@Cr_{zEDdM1KHNbATx-!<Wixh}$ZX_<i|Lc2W|KwW>OwdN|fNZ@8=w62YUmyWxPV
z_q2TB#~=rzU%`*bmIG?QK0sje8NNToC&~9<VH#QiJ73M#@KZ(DUpeq?&@bsUvW_&+
zO7bmgs`}R(1&rkC`@{72aE;U}5<iet>+Dbv;#`t<ULJ@pe0+RbDx>_zekU(kikAQi
zKb>`A;$0Z`^MLuMn7Uh(5HzRfmF$T>U#LRSd?Mb;y;g-KjdnPIbDNRq3?y|nGH|M%
zU|}aqntBdNaT+s6<~ZM@Xyi7+q8%&GZ2T7K51!g|L<lzVavl0^<=di7xo24qsX*RH
z0^E&g|JMD?HNF_CAaMjW`#i!+7b5A3;)i#xWhz#ALn#=tUysYm$}{1=?2mdg5^e$n
zB$*bE>eN+#8v^p;LT|gq%7X}g`Y$OpV(u;o5RHEYK%%;l*}Ez>mf;*?4*D;g;83MS
zUcg541&}jTML?n(&$v&v(%3^!C)(*iW5@6=8*+!<G;;<^^G~i~ioLj1JhpFzeiCIc
zO*GoOTgQzA;)#EEq;Rm8*YmG2_@S6$UezJLkUNJlXbdQzu|8l~9&wGI3z(8oPIxiN
z%kj1R*&cWcqNXRhLn$36e<E?F)2TKR=}p7kbdEf@!y*G7F5t4g-tuwwu`Z{8Y!NAq
zT?Fi`rjIfRJZKQIpr3rZw60y5rA1j7-}J2}Cl`dYY{V^mY;&C8;JenuwOw_1G-!G5
zEraNnWi4PqxKTJQP4U@@g2@#Z#5p(;7UQRnX3FDKdyQM|<88UpD6KL&!z<_lDX(*%
zt2uiY_7bj|MhDxC^Y$fN!J32z$G+6mZ*!`(D1>}--f{4Cn?JARv$zdV0*c=SL_DjU
zS?wOwiCjldfxZ))V2%wzcPi1@RErz<`RDh?!^8uwe;M3tDOO@vhHb`VJpAr99Lrls
zD%ZJvoQCYnP3-w4`BS|n;jp3P5@8Q0d$*NfcX>$z=VFmZCv{m?kG*C}yJsbIYjG6I
z3jI`!DzYxV$mEmxr9#@eN9xqT{J|@>7RqGf_!wokaMt@B?`Y3zSwyxS_gk*;s!V$i
z-q-!5ODA$X+4$Es=y{%915bkq{Pz|Sn@uZCs#h98%OGc;M=b7;9QTfCc`^^Wqj$_@
z5wcFP^RbuxSEDJ*%ybbIV^K?^S<$uU{9+gPOQmrGPv$0?>t=ZV%ZkXkVkN9f&itKR
zfe!2D=$UT@g+0woP!GhGRwf;_4*>{+*8NfdvbFQi)8!+be}9EjJ>IbmzFy(Rf6{IL
zg(F#+xf)p7I2rzz0ifZ3%eJ?W3=?EUv>TIqd=SAzSKO>-2>}vKkV9X3y{Ht?Tyc3j
zzc02_FbYL%Qe8f%aCW3<=ID^aFDeUKl-BrmteOW6*+&vLXPJ4^dbD;yvBA+el|LKf
zWz1c!ihhP+-y~w0!cb@A1c|`DHJj(oM9Mp*qDCm2pJhpq_<0e9`BlhMf^B;II9d!6
zkI}ne2STUu;sjtt*7}Kta@P)(Z%J`?g@EK=&(RkGJX_s)y1q0NM>G_~LcvFI*3-*}
z&lnY0ff$^bGxE6_6At-3GGy`R&S?qCm5qbMMey}o%&7Kpx8)Suc7hDrQtG%}Szf=h
zigYTueb9VABczMeV>rR(t|`uVH>8lV*kgYQ8`n24Zvgg&Mt{{jhDLTj3%R>JT!Z8@
zFEMN{Ah2oqZ`Lo*rrVVH#dFScM-tO47}OhsOQ~7oS1tDR^d#npbbUM=Be4~NO>kh1
zr;%rlNXn`U!E!e#mbWq^`tNGL(lB$!Dd%-^lZE+Q$oooRF6m5TYWv>WhV0@hi8>G>
z3;t$e+XWn_hR2bI0t*&Og{c}DzC0zko`4@4HROL8%9ZcYQv>xf5*S*Dg#Q*yZqE$~
zzc6JHpppKp-VryrQVJO10?KY9j*gYV76|Y)+F~&6v;IZkg<|Uu{Lrhd`h&~+<@dCD
z&S_C<YH^m|9*q$8O*Y@drC+g(c0n!?k|J~N$q~SP(-=E-{dL2MxDB}AnS>n}H^%Zi
z5U}2<vefx2+?xfhS`{7BVRwuBc?CGM<qzl|yh4cZsxKnoDhpxLnYHn}gN^rwGS}kX
zEu9~pdcS{A4mApr1~K4)EXbs%0AZh}w(!i6N?Y_$?)PpKShd~mVU<E<aUm$jDDmMA
zqYMFj&3dX<>K|BIiewm9e2a#a@nS;*x4%8S<TPYQ5ofv8sMC6@UXV04NAE?>e4>fi
zhfX7gt?bY*^dhODe>X+Kp^K79x}HkhM0l<edehn3nN@fb?{}{NBYDis)6>YWKNs<0
zcblMiK9P3$o(eQ!X<!=HnNq)p%bdX2r+W%eJ)+{nxNEGI;K$8UfzQ`v12I!@B)*>o
zNeTY5BOq0A%7uF5F)KhRKf{vs?s*bo3?Xh$<*Fb;W*0XbamdsgsgD17CStl8<w)dE
zZZp+$4g7Wq{bWbpE%5sq#yA*)o_|)j5hVa?Wv_42$wt0zc)0)UC*eGpzdL8om!1ny
zn8f-hUGD)%HHWtd>v!aahu*8xrH3YPFi^~+QNS*Co9_?|OCvho@9Ke>dPuG-d$8Ht
zm|C@p2`6QGczFL&r(Jo$Q?{BY-xkfsI-IBd=Tn{qF&3&*8?{x3QsJSyIAi%<K?~zi
z!3Yd#mq6c~kT)04MYxk|rky$DL%JRSY_vBv*VRV*2N{(mm%S3a@zB0)!Nv6$T~Eu%
zhutLR5Kn@2liHw*!L#A8d%?KFff5zEo2)=mcOuMI0$fS7sGSTrtf%YdnB|m7<A4je
zVK!V79cJAiRW*Cr{}NW|BX3QU*;0^3$h^99uKDi97!$N5jhkkdHHMXXEchHCX>c&n
zQ=q~bVQcet43PzljFG+Te|TzVry9M^$OX*1c9J9SwOEAyh62sT-?s8&j`@ANx{k+e
z!&!S8Bm^7p&F_Uh??ILy)U$5jE6tp<#h=0KTBSQgEvl%}_v|6qPPFu={feGP=#n$x
zmTgwm&Xk&zuNa3%FfW7B5fvTa*$H%D6qnt!k$b|n)D<|O0!-m+)iFelQDl2lZRh;S
zW8@j8RnulaUr3>X3ML-X#bG_V4yPc!T}1ekgxm0z&N<~}28Q$f;MwJ~1LyCAhub8*
zsiUteDe_l-+kd>2{>y*E>c7SfC;orFmVc#{J0&0luizcdGeC@;M>wk?5U&F*B7P~Z
zm*w-tXMTPE$|@%|5_3+km=++6J@h=}aJem2ULhPO#M_7%>7w^|T2b<&FT*54?x7?-
z=K{2g$dxDsidL42i_L{d8<mi9l$lv4S`+_SvDbq`vJ2P(u1H<6G_uOr_X9o123VJK
zzi3<hcp@}VFRMdO_0swYit$lNLJxf-h9cCGJ(#_D$s<j6jW}$0<Ww8o<@x&~m21t5
zmni?*fV0y}!NO?1Vb^r9k4b9q+O>Jzng<YTsJT2&cIe)^4X!|%!Bu!WI!Eygs;xQc
z-TY$aof43CX-j`|i0^V)^y{*_UxV`dH^*<vgjYUc5#aJQdbv0RR~0`FGsS7RIm{yz
z=MJdo1lSk&+7l0qWVayJ$%ol79!*DJIYQ;z2Nha!{LrDMf{q$`j_#2qOUj2xBq{(4
zP7z)Kw)WrQ#d%TMn)&AFTOG+N0-|LgjD<x+rIu`w8san>i;v@cb*~y^H7HPMY7Yn@
zmiWRK6+Dd+G>zkwGcHDy34cbMc!K9>*J_ntr-iA7r7s<U+@x5YfhQ!^%SPmdyFR!A
zAGe8CU1x({O#As5m<)k#0aKO~=<fk$SMrdS)T{?an`NH?n$pFt$nQtMVF3?YbPT7c
z+NMw050+3^^)WZR`vKsN`>-vjj+0Ej#@QBn>&aV#(i!q=cQccdb*2#n#o3>d195D%
z?=JyCq+9+r2P+PuEn@?%x}+7VJ;xlR!zSqM46bbU`)ffDNZXfg+mr6vt+N0rhc)Fk
zj=6Fi=7an?NO{XwTacBT`<$6E*zrEyE%8CEj;3>*Ap9psXY#nVE1nC;?Ab<lz*=V|
zF-*voPNFLs61UR0;hHB|cRZRM<cKp{nchMc<|f#n#P3m)B%tT>j}Paa#GXaIy5JBM
zNd#u7jZU*9F40z>q&gVfLScZY(CNBfuhTa&h_n1n-oTL`Kqkrfc4@GV&6<N9uJ;av
z-!44qttWtyC|anC5g16#RyH26A5h)Pm?W3Zf#hlmJp9cw0esE_Q(<%}w0$~@^x5Z`
zoXoqw`g-=xPKMm>JpJnUSbY3W$`H(ZQU<=SdW~PD&napE5O-0?L{<O=Gu1x4%Y3{_
zIxRb1F!lR^T3^;lOBu<k3|Au%v1jdn`~=i#5ZLt**a;l%igtHS{VW`#koA4n&6qn)
zJaq6jWqHqO&p<ToNnvJWFoLrXdABc#Sid8iE;YVixA%6pbU2Ql?lnHFR@mafzprp3
zLjCk!(xD_n+G1Yo+l&X?#xOO*(5;{I$W<kssY@_9SdZuZ{i>J`RoQ;|1qa1_(Wd^9
zjc4WNV5{fw->CBiN}GS3)V&{d?C8TX;@AKv5A@&lz*2+xIYyE$3Q+R;4}}RNgyZ6q
zYCgTlIFzZ$)bJ026(80g*Il|4wdch(WJxPh)`i<WyvhW_l!*!@d{M>Qave|-)s<qb
z6_qHUAs0E+O83*|Ee#U6UGpRNd++eX!0r&j2=^k<{HktK&HOJ4xFquy24<-nl-&VX
zQn}<SP{!Xm+ds>XFV`8Nd!=g8l0VhgHrEeicrNtd*f-QQOzM`syJX5-c5Tgk>+aYu
za#2+ID`P`(I5PXQ$%}aP;XE(gR4dV*&;7hZSafLc@h;KJNUn9Vd2mcLtpydbQ)*gS
z>Tu~~$F-*$gJEc>ej8P`-Zy>>hCc%m*^2Ea$xPRM(7R!mXh2A8dYB=qPeF@onPZB9
z%Nbyich=RI*r`EBBEHe;^dJm5!w3T--x)ZaLO}cyN2|8B#O{xmL%m_yddtF<&}?Xp
zg^o=XYy_L7O-YYSdE8A*RkuyMV<5#Fm53Q}@2`Qi^5Dt8;UnwQmtW4IYxxAM0c=wc
zRs&gdo<oXwB@6^ae=(V97k8k9p+4g+GJ6ad{sGK!vE}>-s*^}+g=JJ$IE^DP471OJ
zk=z+f%IX@7Cz$Ue?u!bAfUVbXjcYAaqHtzw?f0-yw^u_jGYR)k`VPdTu$kCj&A_A&
zA)=ASbRBPK;j5dNJg+j$0LKq-oIl30i%4v>?10aZ!;*d+FTOVv<?PveKG=UI^|+=V
zt>~^xX-uM*xqJhtMWw8|AWh8%hbD@v)N8e~md$7g6aXKUuT1o<D=58_a3{(AmWUFV
zkR{YIz#g}dnCGFg*M&EV@*$qHdMsJ;#jgo2p2kQ&g0G2sEzZuB@|*)S?>6&Lc^)B9
z;tSA;H?0<`OSBq<dV#%&J|keA6n<LWox5z|&VD!iAWR8jX_8sCixyt6CaALp5@CrG
zMeEWcviOC(H-$Uc?gbLn#uM!sR2K`Iw!i-I6feAL3Oz~tnSAs&<JWGU0AJ6q8$Acw
z*WU#A2VE?km7BAfp^?ph*{vcmEEHA;AKCx|a8|YQ47E}O@vgR<W&-3=cI;6Ml0Ea~
z`3()|9T(={5951Xn|10PBm(b&TIX(E@MF>Zv@rt!5Tw<JdsSyL1}~4eO--aWyt*5z
zqK=K!U~a~<smDGbqfI)0C)ZBY^d-VQ%z#*^g8{aH1%WHj@(`$>dtVuqGR=^ZV<A_+
zA3TmF_|DUBV;()+;k_G0b}+Qglg4tk_gj<=%Ayte{ry3oQ(gt>5vpB<uVA0sL_*D3
z=YUuN1H+o$Z?`w`Zd{+pQV2sUj%P54k&vKXFo#7&MIHiK#vzYBY_5d8&X6nXS4ZzB
zvcY{kv%okJI=14MH*4u$?d*{H&=XVDJ9o%0Bg+tH=Q*4F!GvZ-3~_7vx=nh$Qq*Qk
zz5;GIEeW%Hdn7)(<Rw4NNk!qiF^Adi$(zRjeAEa(kZ6SfBuPV-cNBNf-(ngBzD?-!
zf!w7Gu*eGOChcw?jZa_Qnci^|&He!2Y{xw76(mOEvTx^0aNXEiAGQnInqp>@vd&ua
zM2Ds++-SW-;Cngpy<n92vxzoFPi?JCeu*cVXNP22?CklXyclkB`~$+ho<Q84P0A`9
z07ufo(dZ|Dsf<<KWpIRgV@fdjBEGl{p=eHwV$Ad2rUlI9bYueW(Z4xwt&7=gpozTc
z44sT6s&N+UK1UKmTyf5r(OBlicu06*pW+gT#H>oOx($}Utv<ja&x-pVPiUHQ5{#J=
zv0HivQAD`yOFUoJyyhaE*t8p_!Z=k9P{p8#ux@rdt%e0xIe~z+3~`mJirGnUB~>~u
z5#`4$Mz|L^<zXZ$*&Yp$;EP~XdlmIVpR%k#P>nFw<2`LW3>c`FldlJ>+lqA^<Kwl8
z_;T`;jG3XfRcZz9CnmABbp0*3QVe2uEyP~gJJ$QFNzT@ZoBi{(H)EV`&9vqTVBpM*
zvrg;vc>4C{?z+oXx6Js0+X*0nkL_kY8yG*joxd+prS;Aj4=$P>?@N!_K0be2$e`i*
zMBJc0lrTXFwP~VN%cR?6s`e*u<c8~_PP&CWbtmqmIrw=9=zAnl6ne;-0!I}Wxf84i
zu{6C-S~oIE;>%PLBo7i+<FDff6n{@nD-{E-PTWnuqPXJU`MvC~tT#<JMYWUo5lyj;
z0K^K1Y^{Le;9(!lylY#!iAhD?1);d<5%J14IZ+J7KaGf4VwpaImN-P{2wQ<t1XJEg
z89%oJTnmM5af$fIz6Fk~Z3dz8lv#q}_kC%qB4|=R1U~x@Cb>O}tjbyyV7l}-3L*X%
z_x5rJ(i~;&BqF~df8KlWT^?NwR@M~3e@7}zO@GtNX4o&qe~7tr%uFJ}mk&<a{tPJG
z&{P(#dPc<Khh%Y?BGMXY!5E`8Mi(doQ{^Z){)sM+^1LLXXK;3qmDgp;lV`w6R}kVr
zpaAD{=E}>^H@)Yo@OdX*0Cfn_(Y0DSWltHN|8wXQ!=Jl%k!vYvEgMCK1`SjiK)jTs
zKa2HoZKSWg1sZGrEmNZHF7@?=9RMa3xIoweV&1S2kqJ1da3^Yr29jLvQgmg=ylO7o
z47*b{&p@9kZmUa-yx4f2ntDLiG6EJA%X!8P>et>l*sx9kXS4PJK)lTtthVY<=vwIu
zLzhvQxo->H)n0f*q3R$dmfjtR;H=HlJR@XpQKzn9@(NTF8KxYe*mrw&C*;)af#471
z3>xz#0#O%@9lRG;UKF%A6mjWsaR$Cl;R5n7CQPIB@X17ML9|9rZA5vyd%2n2Q+ive
z?;}RQyG{9>-cAY)=yykwbY`klm@XSHh>Iogj|g*|DHU#`Ml=L0epv_|vkT+6eb+bi
zyKkE=6`H6w{B0bL6o4(sf~Rp=|JK1|{U%#2EmH6TmRzYEbpOzwV<uS7t?B7&cVFGN
zZl!;mwq;Z{VnUrBazEV`)HkZ(29oJyka+7^!n*e)U}gLV&_*19?$1S4shEhaZe~y}
zUqg1f`IBTG&fljn0EqMarAtz&YO=saY2Lk2(Tz)m!+c)WCaK;tmP0y^ZG6(twoSDo
zJxA8a%B<&k=cEd}d_=fUOKLd0j(8styL~JWUrtnxUdSEoMw7u_8Tc_o?FrE*dknPG
zFTz6B3gGh&&^(4v!%v`!NgdI?k{aMy*OK{oaO}*Vf8JB90WiHjTGOfwCHq9*E5B8$
z8I_m8t55zGjM6aQOL{`3F&E5=F9lmWw0ee^?+(q<O<BBp>9=v>Iv(N<W7Vp&Le!zq
zR)$VPEFmz&OfD{ckF0wr`g4amPs_9zuAtbmxF`|}aBi~s*-NKjnLH?;=Z;H_{3piQ
zcaP!i*D%MLz0F&_^7Q8M)*T{DJfBxM`=&Mbrp17vqO|AC^4F+*<o7mOshmv%tg}J|
zH?bDWf}3}jWsKZ@%^&XC#i5$zE!uZuxfR8YGZ(yZ$P#g(mE8)-RI#}RRl3TH+}MDw
ztCwa4c=GB_1kZhINDer_szI-;6=h0sDM$H=R(#`22~MmArtn`#h}8<L8uA3qi`at^
zf387pO~vz>|5*Bg`rX0FLnW^&AlYO3^KV2vn!(eSkgt*1<O@XokBj$T0IIdV9^k(<
zcttsz*{^wdvWnG7Qo3=+5*dkH9SxR2u%HJNM0<h8KHkD2VJUXETqxpmg3WBU9@IAr
zXVWFC>-7RhO?gF?{>aTCPhu;DR-%H}`4E|T@t1{#pmD=8fvbU$oDE_p0J7eFERta*
zqNDq^y&S;_Hfuv$#`=vblfc|z#%8f8-sO04vI)Uo58fMzrlR767^aUPrz){}*?uHT
zxokcGGBh=`BpzKp<RQQ<fz6!f%fjeFCe9~Hpu}EN=6NPbNP<D?Ni3#aSKGrN%0U!1
zjBr3YT*rU`U%((NlC()2!1a28@`EnJ2d&RVpTjje>6VsHzC?XHYLJG=FcR|M2{x@*
zMD7nx2#w!vYLDH}@?m3OK|qLxeJ6cu$=fZkeqfzAbqr8OyJnSQj!>pG2sdJsN2bXf
zO2J$ZTi@_SftT3h9}u@BDr@Y#sd{COEED^lEyto`6jkxV<L7}iK;8Xm_)l%+s-+7*
zcQFHBm|I8L0q0REl88iQ^ZmLQa2NDPFG5`OHb2d8vsi({NprQ}m0o?c67?XIc~ndf
zBAhef#Zp*&g{bRhyRnmINWzxwi$RfO2D&uqk?X`Eh1J&%HNd}AQ3(aIrjOW>3A4cl
zA@J~e`sMpj>vWS%0SNi_*iTpcyELMYsp)~ts<2XAf6OTt37O1j&v+;^v^t0+9Wf^=
zvn!4n$M$zoDV9-ZryC3%xzUwTu07=DSYuKRGBDNH`ab*ANyH;W{L>Mqh*o~T3JR?1
zR^MavdNKQ#ca_H8VH05TJht{3PUP}LjC;dlMpE~@TyurY0Q?Kd9r9cej@9v;b{p_H
zR@|G-pmQEN;eQ9A3JaLHH0!)qrU5rQkZgK%{jJ1RQx+KNzObdZFUzL?WNrR0XLDOq
z8%G;`Cj$$kzucDp|2i!I*kAC}zdm2$pr|jp>6a1Mcbw*27Hw<nyBw!Q3u<#7j6X)P
z&Kf957E?$@4m7bKVs7y)&yR3Ub%s<@Wn^LO4qPU}gyRW~2n?eYqR+eumtPq*FXhj=
z2o{MTuu7?v(DU!|gXY&g=4k2#7Et3AqLQ#bK)Z{8Y3h{$?wnVU{`7i--F@0>4dZ>F
zO{3t)Pq$=&$F{7Fi|j_qx`x^Sh<5)o!wdO5C_EHpx~)Z*TNmVN4><Agj2LR)ahu1g
z<lFJx*Tt)`v3wBo=rK|ci@M?QNS?miFs;w~^ZV%2WZ-*~3Mk)ib^=w(J=9OexCiPE
zW<>$o;7rnhs7{sVSGUPN&=8D(`|qN7_6EN{pp<)jZ_6bqLQBOprBamP6qlOp(?34v
zppHoo0V_|254Kdf9Pay&a~TX5xOUJ0LbHbso&63?%Z-gLHQlK-3$}+5XJO8u$OGzS
z#doqGvTF?c3`M5I29-VEkZc6dq-js7`G?{8ehPU2=EXPoJ4Nf`az2Ak7*M^finuqF
zey&vLzDYqLdmD;q>Yk}1D-fdHp5nXZB|M@2c?iVWUbc0!M3a8xSQp@4_7<k73|h3y
z)h+a^Obwc+*ktFcu_nw_yCLU!$`FUhbPEJB%wG@8o-jrng!k0Q1G!T7zPiA>q?giL
zt&2v0OGcA{dyL-^-7slaalRum1}Xv5Yvab_BnJZ0h|<8`LzJ>jHB*DV4@AV`xB+XZ
zAl~{S#{4vl-*Up#@6uIS(#`VKJm6P-CQ^)m-;E}jqCrykSqPFBZP0G_-!alf*B|(X
zC*Hb0hp`ux?nVSCSBVf8Eywzvb1IpG>eU<pi<y@$LIYNnB;_9$Ypp}jhW4a6*Ca3{
zrFyh4l#k{t_}8PYlezk^<CNiRwlr!ZpWvN#X)EBRzgV0gPnMi*25Xs!659|=7Opv5
zNG-Ck$_yY0H;$s^*!kBV11RPcl;v9}Y+mH;cj?Vs3RBX!hh90?#Yy87ItG+vJh-a>
zkjdiZ;N--``nimS6~L(IahxjdDe!SK-&IOP*9<s5fSugZpxk&@;*YEQv2fw^s@ynn
zcjU7M2)7k#UmTjfkKwluCQb`g2l|$8N2Z7PEUSVt#hPib+=9Elcd4VZDi5qWy~?<H
zDC~3+zJ0c%<CCnCW+W;ion+mYYGQx+KRss15M7X_yS7AD0h+5zNY<D=oXi%M?kCkY
zBQjo(9=f<}U>9|MvcYQpde_$cX;wLF60#6epXvhfa^vX9CZqVl^Y6DqU~;n#p|95W
z>#Ozs_^0*O*2do2=1Y19_-`BzCxxD<FA`eEEvm9QbY8)f#Wbm0e!fzEF?PjVhh^g*
zALz>Og@eRxr7xGbtG)<HFJPI!eqMH9*43iP{}96^OU@PKqa2Y}3|W4)N(>(_iz8Oz
z*GAwg!?0lob4I`c{3#6$(->xx*h&2p^b!?Xk2;Duz9T#wLEe0iGbF}lhE<PVi&QFg
z<m_*Q<rXUznWfxz&i--TEZ{)Ap5=;(Dn38X2qRzv4G6wn|8bS%q;i<Oyh{&zZ4byo
z%P%F@;RJl#?e9<c2UIipB99j9s^NDZC^HNk6Z(8cR~lskrf~-1knj{FzW+)%<OYdF
z_j^eod@eYi&L*a$`A*u**koH2oOZx4N+4tcAwsc&)rKxz`3nR~nW((Q0g{;~+eR_x
zr<A#UK7E+@wD|q>g?AKngJe1tttgrf2nN&(SCxyA5}y@<Zz}ggy_OALedm*%K@g}A
zA=9jI?;?LR06NI<CSshAhlnee-lt{%ZExJug&3pOqY##akwzCx<RvT^_gxyL*`i{W
zw(IBQLaOR`c7=BnzuekxP%X#2RfSb9=8Ol2a_1&j`9hi#*(bLFnPH<lcb$+JM2X!E
zi$?h!`pm)+zwtTT4{AK|jmi_NI#nX~_dv?$eCWjkz%S_~tf0!%yma#3Lmg<Fqj`)Z
zcn+bw#=NF5$Z21`@0pZ7pUu@(a$T(6_?J-|p?KNJmLPgGNkFf(lrK5~Ia?c38Qm+v
zh_G1`U@&+;bv0FaTnJOYL0>k5zIBu)DK^ydPpZF%-~P>;Oy$nh*ZHeR0Wtr#Dz(?Q
z(zDkyH2ZHbjF-ayn$oWN0+7*KW{6#(63k7#$WI_BE$OUk*7~JjrExMc8cbIJhv_I)
z>mzRn8RwU;7j1xBA*D!Q5pOjGk<w^B;b|;MAyw<rkvT&Ecgdo0y90r}iEgxRg~G~*
zKtEznV7V~shTnqu05-7lixhgk93(WO0T8HJf}mW2g&s|cS7&E>D1XW@E4TP>L)`dH
zEom~9H)j?S0T9PsO#_i-U4-b`bLTeQVJPK9psMPt4@iKsK_gmKmsh(=AX9vq%x#wc
zzMH+U2UVaZGMv0A?W<x<qI(9CW4p|;%6^@gGIX*HJCFzCi~(r9M4ysxL|CHk3Tff%
z98)NWC6!7fIZ3X2ORuGcm0-^!Izw0?-<i)*Rv3D3C~t#2Z)XpocuTmiwNj-1H|5mS
z4%}^*9!~&r5CaE33I2pBRz94kF5(;&a+Rb)?Luz70QK{W^NRsv###hCHS1G6Zs^&$
zEAT;hwVmzr97fBi(e9s&sfx3W3@C}c8U!e#{krQA4fc}Dz`-Q%=Z||*qJ(thQ-Y*n
z-=a>1C8oRW+li7V8p@nEL@h%9&S4e$cGt&D2snWD^RpuwTa}a*PJb59&x!B${%``J
zu%P%<MUuD*!tu`ojMXSnOh|Z9VGrS@e#YA^v&oO05oaYM$WV7k2pCzs*%UO0vyVla
zG%{N-W)8BERl3o}1&$0(@~Y5l&yvH?nJg`?d?QjCyWO^R2M~%SC-HNQ*A7J6A*~Dx
z+tdJVPSW>Iv_#Lu@iy8Io$ixHPKqrStA^6R84*XN?JT=Liy$M`)K3E!`O?$9qi_Lz
zpkEvt-c~eo#>c#xh`+~Q2v{zY$9!>r1;2)>e?;o-jST-1J^yPS1N^i6$19G{VvQal
z@bU$<7k${!f^14+{34a#K~<C>pgx%xqk_53IhN*U(r&=i=SEHY=~*iYJM?(kbLM7q
zIaYWS5q+b8#x;49l97dW)Mc8xNtF(6>6)5h#J+F?O&8?*l$D+X%_QJNua`CBZ1FK|
zJo@bL9ahH<g;5ITFqJr}FX6&#lSfD|EE_k{70b0o@-nGFn@8LG`N0zr=3vNl>H0fg
z%6DNO%bqhd+*D8#*2MJ>JaPo3k^R8rF9tE1XH1>ghrD^UT1kn~QN)93n{b$XxNYGN
z(m5+JvGV5P)T$lTOh*8hC3XX3X|$}>>xybYod6$_I|HJOq$^u*%;oiL2+?OAPL}Be
z7=@-$EgH%JxKUlON~r$w3Y^0h%|RKjFuECr?c+i><<&bw3;cpUgA^Bo%rg|-4ocEf
z@R_*D7ILF1n#$TI66AuvjVbK<r6TOXLT7P>ukN`O&F;<#G7%ut*&L+DN%eN<u`Wta
zW7sn5@FzHlbAlw|1To56i=q0HBx|d<agJxm2WMT^zc*UMgSEl_FOs(p^50#J{)efN
zgN>85;Xl)K{>S`%t}<=C%7XGWe<R>B4u_BpsRb86e9z;(jma7J$zKiMg{~JJvLu!x
zDrpv&25ffZ6U#Sp6k%8EtKP?m&b1^T;%Und<re2N?qwwH^EP`7K>9btfr~M!q0|zO
zKPj~&C0|D7ro@)=5jP;VSR-Ehbc4&G0+>r!%$ce|wWbQ>ks$2KX$RD8gUE$p@nM7f
zdk98L7WR}ENJj0|>dHo&e;wPJ8)R{L1of3>3j=X&bdu5wJEBM4(co1!#Z6J~#;jYi
zQm`UB8roR&glMj6#B60w#UYCOpca8M-1sJ2h2<^;mAi7_n)HkN#G-Z#U8x(b0Z6Nt
z8A&L2b75Y(SD8@m-a8KH&GU)k?kJ*vk~A3CX6H(7pu6#JSaXI)aSxk@I$oe^RrrjZ
zM6=ZRFj6xE%~3X;{$Mt-3Ts@;79PdQw?&v*q|tmi9bHtZn$fY=j>nXnwz>9fWhk7v
zJ$B_x@0)dQ>yQX?(CN;oAkbd30$eV+jLz!WVlOACBp=_8{z{Iiit<$5J23Es71E_D
z2(w7xxlz6K<RXmrvrB<lVB!_H8RQRLx&&{Qst!YoP(vb8P(_&-ZH|(@OITZ?kfrG%
zwBI4SHuQlQpCvWx11E+xMX|q!8`y@X|HFb7@4AlDE)d@E4Ix!?H13tj6+l7Bw7E>p
zdtd6A&*n&Bm==c|DQPu9`;gXXF0ay(A3=p=^NVnGiz(O93a|D^_va{Si%M<f&hQnl
zFf0ehVLucoV3+nP;i$cHNV+y{u=8R2iPq`IPb6u(3x-qnNsjN-LrY2vQVcM)U=8YS
zF6#Y7Fg{C9F&L|;sZJ)rxPY`u4MYxsl}RD0nl=M$Qs~TF2i{3SS*Sc|u{*1zZ`xQ#
zWy>ee@MX{i@pbnB#)ov7GTM?8N71K0A&Ai5-Ux5JKxi`bS$;1%p~SBul@w}p_U@8C
ztO@)6p0b%PgD~g7MvBd{26RS&Se<Q+Wj;fLIIWEzTG+UN0$raI`T?YBIh*0UCvml5
zT&9j>h1XA@C~IfT$w>_)Z?lKU@%sDMy#+Ccq>`RDV`m5O5Sg-<73!NrqP4xA(|)2w
zA<4lgii%`qRM8&8G-VUh><gGZ-d|MN68P#Ey#BB&@<6Vy9*swve>~oM18IOlTenI=
z=~{I4e(c(+IYnaC-3Is;%n6SA#P1X;lc;$fDMfQJTU~g?-=l`b^wm$gpif)s`?OFu
z1O*qo>dg3E9^NJL%3js`*Wqp%yQHjlTeB|#(+seKoI9hj>94f_tCmC=u%hj~Hm^1D
zHLbllkbzm*+^KNf1)d|&_9Amlqg3m7AwGA#60S0Q*0Qm4n*f?vKG}j%{^(+3cg2Sr
z(Ot^ffSqI6Sqj8JVL>ClCk^bP1D%fwg=;_Pt>-%JwVY@w%^pB<e<uAh8?p4*{5*;J
zQJ=y<vlRh};29qL&Slg!6s&QG>`hxf&d#ip@zc6(9vp541nb*!pQoG6k(y@|kLBU!
z{b~h0q~@c}pEH2W66gwN&(QY0AYD@zKYNbSfCL&~>2!Q5%L(-|FN&Qh_=dK^EZBgB
z2FjvhCG*r${@(J?m)bYhf)I|KEu3rTfV%MVC^aFg+R@ePd$goQV8(+#&)=X5Vjes#
zy02+(@9VYv_qO;y{ofpH&5TTpEdL`;@qb{7$p7tE9%9AkOnc+Ji52&oUmhx7);%vK
zU$|i~l(#X0+NLq#!(-?dn^;qUx4ivCI$Pe)IJKNe5(v)a%uL3CZY-ci<ruy%{>k?8
zt$g0kUZe~ob~0S2_#*drLs*$|mdrzvuw;^FRZ&*ImGXBg&fjieT~DKF=f<djk7|-{
z`bWbnxnCEUBayn>=XMi5#m0LNQUcEe5Xa_MG(2Gi9(NuaZyV*&_4m7bJsjQX%nZvy
zKd5NBCGrq$`Kn_g0+JfwN_|t@Cjd#YBR^#xJK25mJgJY-8fj5bHw1uYQ7&zv21PZt
zCU074dv*d2mQ#eI@6it*Sgj&`>QK{+b_ecC&T*%o7G|}pyxyF=QY2hri<E7fTEz7R
zHsjl~Xlk1FnvuF9Org~D`iw~)`S0Kmy5s(1?ccqjrhOeu5pF|A&>qTCSb*O`2K&BF
zd~<C1OP46}jChrL5uQN<cj6S&JNR`j9YN1kRu4LrG}M%>CnHAQ#5$W+#{oVJxDQDZ
z;C`x7FQl;h`m_{|e$ts*Cdyw)nXr_v9j8Cc(&q;sMC9P*%gzd`;NT={Y$ock95|Ck
zG;Iq!M|$y!dM-Y?scEyQPyvbC$~(7eF(|kipK89D%usJN#8#T0@yeG6!azb6$hdzb
zdtfiXS%g5B9`!I(c94ZnCd<-`Q5+wVzf+_-=w{qAhh^Y~9b(sOO)T$gH0w35rnH>m
zV05hw`&39uJ2$-hKmJWzgM>hAKK)gQ)?W$-Lcl*Q$PSKrUls-ShF?k1{|PtyKlQk-
zqV=WH{%YG*1ntcE$^sh?ru__l3w+;+wWdggfae)7LZe#bbA{#2{2D&JlJgD4lGcd5
zmZm6pd1D`M0aqJ&7MTa~nE6XSuZc<5WX7dUuZjU<<b%B}x)h^hB;|i~aS9lNltM;<
zf+IqW(dQ01CKmZbBt5<6sYEbL0#{zDBI5=IQk?-m|8Y479qf5`x&(wb<AH>@4#==~
zC3;wvwPh9`HyS#nUKELqni;uR2He>%d)v&cNQfSQE`132JXXg~6nWWUe6~^TWn2=Y
z*5C2U`aS*27;wSP8U=H<K0ZoAP!Ekk)xryC_JbRK3{y9Hl^gh3G}>RLz?nG|@=vK_
zq-gM5X%Ry`l78}<;0Px(>VM+=#%SSwJdRJhJ_JTgQcOzc`<xBxZYdU?`EAtFRT<{g
z>vSFhpyQoUJ-^>dN#`yzIu|Ke4AVvDp8D^P`rxdo>A7qv?|ah|L2lFaeE;yvstNaI
z(CJ{~&Js8Rugr~Scqk#UjNFnhR!z)C81Huv*lHC&JBuGl<FPuS1_RrTjEA;RY=2Tt
zt?B?-Jc=AswHo9OvmbY+%|vmHU6<=Y23sHn90d*@1va%<Ftsby{(*kb-2$4XHp6A5
zx25Of$uY=xK)Yp*j$O4H*oNhm_fpn~lXelQzMF~L^`Q=VUtg(*KY-3ugwr8x@4ebo
z!*DVR*A#_e2EjsW??5UGz*?ZXbDDB@aw+(2b$k+)0~r#@QX5ONs<tLOUYXz{SY2TU
zD872XU#B+9%EVh;J5d~>m0GfT21>o5r8FVIQ6>uLYpj=pkB5Vy_<1K_ZgqDf0ae9f
zy`%NDG)swcC^+NS;h0tixA2n-$cS;hp=WAqYma3x0<lq2usVjxMq~AqVqaypc8wfG
z>i!W;_%oT(v4jMpB85NEv@Ux~1N_Pd;7CNl!pc4>_?)umD84>RN+<@8-aQ@dvn9gN
zUfF?!-%d4wM)ljWJ(KY}S~N1vE`2xxEtGESJk;W#T}JL)RCW^!r`CIft0N=QU<ez>
zbPrUgpI-S2PO=K<sS2-0bSbtkC6HOAVGVfl)@e+MNZ~gkucf?)M)vqU@>6~pfW;Q`
z<}x!KZ(114GA3aC407nl4OI#I@$1i!h(TRo%)3fn>FvCUvjIGm4;Mc(u2aT^2cy{%
zN*C=ppO*?bdr0|tCe9<*l;YRjM8tUv!aozKG;M&am7zq{OjxNMSiRNg5e(njn}cxr
z{)cBV{805^9t1y;Vk*XR6(damAcaM&b;tlDK6WQ62=96|rX1AN`hj3RqTKWuD~FjW
z;diEN&a$+jpGg)|zkIYE*kF*=5Wal9oFx}-+<CQuNJezskvkM0>}m{{SH(%#Vj;E@
z{{>5+S7QNu)Q>{y4Hfjt%m0V4cVN#1O1DH~qhi~(lZtISsi0!pU#yC4+qP|+72Ect
z@0mMux}Q1S|6%X_?)5HFOut{RBI-Gck-5^}R)th|{=5J?WhLZ6+@c4Jo0Wk5F<baq
z6E1Lb+DpwId%{dw(DN(DbuLyM^g5j#_@vI_<mtDj+M=5Vwmlry{f$BC3Zcj(?NhIa
zbz8?1RQiqur+6h(cXazFijO6NI-cP=hoK<E&{vY1UZE;KmQ_Emfk>T=)Zxctgv!vJ
zFuY`Y0vyH_#B?UnNjfV)cYE4ZN#kR_vflvhnV&RZaN4BCEu{C80QK=-RyHAn*`TxE
z*f#$A?ff72ir>$slewvb!GFr{Dpa10O8D+k1su_&xl<WKo9&{kuFejyr5b0mzB2Q~
zA?V~36;2k8U_WeI`_aJ;MGpPcZ8^$v^x(29MZ@<00ft5`87CxJs?}HgG~A@rj0mA=
zfC?c<q_G@}7>uPy5<k3d2Oe7IyAluZaCG%b2Y3O*AHmGsMzD>Xl8R<~m--nuC}wlh
z0TP!e<jR@B#nUG*)Oo!U3PPd|VB`)dCH)8@Xz}=iv^!BM$qaHp$fGi|5h=#AS%2|A
z5|eWr^wyv`QWi=)@Vaz>Ky_HldmkzCl;A5BDkhG^%uWpD>&cpA`ByQiZV@oiBRfUm
z5pyBi1Mao_4Mg(id=J-sichVKN4Y6I0AS)xBtaJN4JO^N@#Vq|?YTGMzg>RYN)Brp
zR?A?h7o8jw{?>VKW}&Z~uyv7mjUZz9nHDZN2#50KD&thciQ9>31NI)!i3DKLYLP1r
zF|}&>oX?yv{Nv!CmRs3k(VrDZAl+c@<YE69-{~R=lyQ5PY@)xefP!)ArsR@@4+xVO
zCw(_(VZT=klKQatBoO<_$k()g?_DV5E?Mu4?B1#e)d+lzNw5SzB;^rN$+2l3yIMD3
z{dME1JwffJO)-v5HJW9!xaX4<@Gl+~$!yYcy6>~%@~wUTuduiOM-C9c(dqYpJ}mz`
z@3%@{uw{gltgPd4oROq1ne6m*QqG?hmQlp7N1{Pgh8R4q?#$)mibsi|i8jB7(K}Y;
zr|a})e|x-{qC;*BVNxS<hk#GtAAc5+vj;Bio9lXHPJp5gD^#}<YcgG5?GGJ1a#VlV
z=?c||5wPU_y*-B=&8pv;l2L^s*{HpPyiOinn_;zK*;VLw>Ce+4;4m?mkz%Pz2gbZf
zKt!<&jpn1xA~CFUxlxgeI|qN+B$?yh`CLr<Cs#jylQ}ffj5-M`_kH#LK8c!5e}O5D
z$U%-kN(6+~dxfa!5Q6%<8f-CY?fGZimvJ0&%u{yS4Hvsd(tvWgKJd(z%IB|6h}!=2
zy(6bb1!JRGLnd_>M5PNAvCNYRj7$awl~{!0?1sMDeJkJYduGzvVyMCwj+y(pP)m`o
zIO~R&JzA^F1AKNCz4Jiu)hkNjd1OhR3bi$OF$DM&<nPoq67}&7!RdhAItd{&rH1tx
zuIMu?#`q-0JO_N9h_5p%ei)8Hf{nUapexl0)#br-9}Y-5oS4I>3t6zb6GG?W%pj8J
zDyI}GoR%TW<cTZxYF_cBpr~PGeDdCFb3Lfy^h`3;NHI_DU!5()cz8Tfc|AW|Dj%PM
z7y@*1wWmTC!Lece?@|^XE+yisqx$KbMw>~ouXU5ADyn#E=-wOus-CPogkNak4P`*P
z0jcagizH^SRdJWoxwFB~Vi$UiJ4DU5tlDWc>lwNCK=qwc`;i11pO2q^-2bG|Ko&_M
z2D#{g>r~2RP7op@98?53lGhX^Pc{-**8@mz&pXc1ybEgex8ac%GJv9sOf%^IVN$3r
z0otK5jam=*B!EXSPYYNEqr1EYG7?qoG96Wk9d2|};IO=1MbZ*B<|f26-;Ma25IVbX
zGi_=^vighK>&b%oI))0<n3$6mTjG?6)o6wOIXJF`e~@0s(?72|miLQMKd@PS{0Bf!
z91kMg<rUYOVs9kcq<1_B))U`1*;W3I5x>WlC=k@k>1HXVg@rM!%l*xUmhikyQ7xem
zHGGuG@A9lLfccv04E0=E({Sn3R0;gVWB)T~NwL4k<*1MF07-xE2`qoCY_~DLV6g3X
z1vu+E38^S@%ETm3KP$(6zOdfjaVVhq!N<SlMGpM<rBK_lOMh#KbO*~)1nX5uTD<&t
zadZ)9j&D|rNjCNcyD`3y%e#G>#QI`Mgs(#F5lYvys{L_=4Run|3C~kud``>h6fd>s
zcy=#uFBNW0=od_U$=CF*$|IY}n!?$pm%np@))0$%KdCtzS6;w2Ce+25?;xP4l+WBc
zxYIn~&P9JZnh3}Qq9LVojDtdJz3R<*?rqXxBF__MGmMRE;eKG?nSDY3^*UWd<MdeP
zX_bRu$dsji4EM@`8f}6`^Pnv2PhDpCR&Ro%eg)uP5U3#iXdmY{0wwwmCHwETX23ty
z-~Hn)GO{)LPjo|~vi_e8MqIBaWiW#<y~?CH3e6|V4RrJ<C8=tLPGZf-v^Y%3-7WmW
z?M}(}D+kzgj>!pI@_1+icnlb*bIzc@aDiB+5zD?#Ux)zb-X~Ksg`$>YB!hCOM9@A*
zUp*2N1~3f2J)Wp|MN+b7KcsQDAw*AGrio}erjM7Go9mdL-T*M@A2mv4Vfhjd1*S$6
z+_AWKBkf;J*5I;eil|q8JOtb>LQ$|WFcD_q?W<V>4N$&AkJu(mKJP}Tb|W!k)PJBU
zZV-OvNzMY0Lc#RiBa4|#phZaaGFL2ja(@y@Iph=%{vsh-p*4-#YJ_Lur3JUeL5%v^
z3H0tAhDd~1eVKTXmgO3o_vxQ6_?33dLo#oYXn_OTs1#9zOcJZgU5K2`2iz<{8D~!M
z7>>LgrxQaGv=z^5-+A=D*?HCX%l=YN`%Fk0h%^Mi@|%XDhHB6bC2)D`*Kyxo%ph7i
z91R$DFAnX3<?bzW6Dfvqhgyr;nz`HN@sIPC>arv}OOfNsvA~nqW=cdMMxY>sBy#;9
ziOK4TMyb$qDHS%2toig2o4Rc5MxEx9tVD|sb5Tq3b6q3n3nB#lh_hnBred;-A(na(
zZtdNG00Bm1P#^*i)Y9>&t^6HPv&=>s12hp==}vY|dm1IRswrdBfK@RQ%jWRm64}lr
z1~+=D8VgZ)pATJy92)?ychs=PcN7Gc1#7p^$q}=#;F6ct%b6!G!{G|Ra$|)%0IyrE
z#2tKh9AEAj(4G19&nx6Z0GAHT)Ax{+L-;p3C8yv2$>W)+wr;ccU)qfZC{nVh4e(dv
z6^G`~S@<7Le@Hift_1f;Bupl?2^8$gHoskE=2kTwu<G)yAOpCbxEv*7_ce2O<P!@?
z6gx4zSi}F&0y9j+kxJ+4b<6Hqy@+DaE@(1gM+%K5_zxdUUd(Z=I2M!iP&~iyTt1RN
ze|*~Ltu%Gcb7#wuzkT>znB!NDz79a)^G(edHtx~-q@N^bK(-n)C{bQ;FpOo+nA~B7
zj_8Z4SHWKWMFb%5JWlut)_X4X^4r=~roN9C6F3{|8f20}u3<CpAslkY(r(IqU-6wb
zSk|YOJq2`R<VO&LBnxt-nHDiavh`ac67stQvJL&KYRaPMj=wdNEzD&NptO<4B;e!g
zCn8$HE;W)Q^`G(P)_o83e<p+mj1&rI&Ltfv5-enxmH^g8!*vy&_zpJ_^jN=!BfI_2
z>LieeMMnjl&<Uqt;KV3fbUjMe(n?XUr=?R6T=z%M6pS;X%%ARVKab=Qr7>ni(#)UP
zq+d$lSt&W=fvabZDe3q2?Bd<e{G0S2W~mK8!kX#om|f4&p<Jdd7y?v(Cy|2q<?qCN
z$<mA#?gH#eRzNR1oaeP05O;+6F7}VlBznMd>IH(pk4lx-y6q}O+rVKU>sp+lFf3q}
zg6gbVfQpaoWz=+^67RKkOp$J68dI?+?Wa+Twwz5B#+}S1oIi!D`=Dmtu$95i$AHL$
z^&^pUTND|Qdq3uW%v@Vd7ifUK*2>i562p&cx&tg5dt)O~OqLrYzvp#%*l#;uyDUW3
zc3xI_5Tm2XHt1+D<+=^EsmUWypVzI$-Hf35h0+Yw&w4L-lZtYg>c*Kk?azqbs=xLJ
z0|`Lwf92u2iRg4&gXwj~i$7Wt=C0ue8Pgd7?4uu32U+|F5{h>)c#2K(zlZX2^w>v^
z_W)jSkFs>aJyyV)<4-`D3mUn#k)qfJT(HF_7cEHyi15~i)Qklh>P53I;+1kZc~nRf
z<(e=h2!6X2P_844+Mk0|=MuaeqMP>UlCg%8%cl-Z>z&n~s#8kaGjCRgTNgL0EQjaw
z#!z%5zG0L2;c3NfKSuf;HJl@aBed1+bpROPsX(9Tt@Eg^I?Jb_-~8`{CnRU)wv(4W
zC2^!wb>WcKXJ~X(|D8tG9c$S$#$athS;Ct60+V`&<_h0uY3nd$!;(d%(HSIXzZ7oH
zWkGictW9(|H;8R?EE6lQoq+5i7%OnogbIP<VkFS1BQD*&d<WQOw+}jeZksS;86cYn
z++wET882&?JT>1yXD_U#sb_+ysl{<+l|v0yPQCxI22Jx6F03nHy=4?17HN#R?23G{
z5h6c`ZdtWeMtkO08L4nVqsgZy{$vXlyQvbuiX0iv{kvPD{XI(X{ID9U-6Z6468Od$
zXGv1TfXE>d>@R<PpTJJ=UZvceFMwO${sRZxIB*MF2?L=O(qS8R6!+Q@B~|=FMGnW9
zBJcrGzuGu@ZNi<%u8tY@tU2W%L3Yyo<*UR9gQK-L0*Ubv({Qw@3_CDgKAPb2vW>HS
z>uPMDYOpPn&S`ORQOiAjd=;$Uc1yGlTs6>bKyhU0X<*%hzw&Mw#gct%2~cF>DO#Pb
z3}C-8A`w~y%($7#CUk1*c5hdlYY>hE-`h<oOv~L8K-QJ?a#bE0n{`hbt}QwWRBdOL
z`i+&&G#lv^8H@dE*)2PNS!v|=v?u<a_W!Ny_Mf|Mot&)=tpNY2?ABXF_uDY_kM&*)
zWB9ot?y-@X63j);k0z$Ii(GuM+G;$-Rpz*k2CKKLd2wPMo04pmlMT-)z9aUEi=YKU
zrf>@?tr4l&raaKYD}pwyjcyA`X}W#jSok!1XORIUKnlaJZ`&1fWE^F<lw6|!aPFqV
ziyc9BgH%#g#j}08dkalgm+dd`xbud~<UgWbcNZSxqRLJUgU&AQ@%yPR)($kbNW+~9
z+%Gs^CTQWL_TUv-M~j7A+Pq;pZ!ngWq)K`@6j5lm+(Cl30YGn!K`-mKMoUV%m$;NT
zh3;Oy0G>I2)uKLClSouy0u;4aB;$i=U<1+sSOIzOjCUbC2xrGNs>wzF9`J!CZuotY
zdHiNR>lxOYI-1(6mK;@kw@~>026|K}+Koy_y-|n6qiS{aB`HL9=QF}HRv^R@EODA?
zuB;Y|8C$+rF@(K1rmeRh7Y`xzV0<Z51#=?y0Mu0cBi!DKpMO*R8S?s>6nJod!<cGg
zA?}yvYe(gP<D&O96W%#_Q(t<?F!0Ls6piAVR2AL4Qg^Z(Iw#!3#C-=cngmqWlCqm8
zjB%y%hY;#)4zxRQ@6ICCXOaGhLA5BO()TXXF#x%S_94En@B;1>E=g6v=3VC@5l<aw
z0koNNqz+`wBOApX1t-Xae1b&>OFO;8<3c?0BtY33`o_7;<UqirncFn5$f7z41+125
z<qtW{nedoh%hgr^FZnSFOKhtMk|{Wg^y`galmvl|wI-Jyeri|!{g7++!tZGesK`9I
zG?wkwd%lD0T6aC;Dvs1jZR~t|b!%VE0K6xk2)bOq4}boo?VrkaaU!T~Z^Xs8#vQn$
zD>lju3UYQ!-sfuBTrJc+e6s!W5&c)8bEuBd^V#=S#_{_T@z2!Ce<Pv414kYH-!K~h
z1pI$j*+Ku8TInG!PH)_n&G;b!6MQdpa;e;7M#qJ|I5^@^S9rh86UShm=2T8$dN{#l
zO21RLG(_C*pTnw^S2Cky5#)w3ty1c0yA;!BfdVE-Br%_JG=wc?+yq;Fdq*m8eVaV_
zbNz67`;k%<LLIQQG|3&dclY8Ix>j}nV^!HoBNzTRE5fpVGKE~~R8u*qcZSh){Kf5b
zx;zh|xxIccXm)Oe2-iD0*I7e<aF~Oi7KsPqr6k2Pv80uvyMUAZl7V;$#xz$D3>IFb
z&DoEq;A`GzSct@}cuTmo?>9(TyNU)eoB%rlUu-LOX9A#S>0qp|jg|LW5lGH@g+dg=
zFjN=T5Q1}<#LPO`2y;c)9&@0kV@ypWy2WT14s+;YY@6GgpmDoza56#H1E|Uyet}~U
zs|z#5#Cl9Q5m{SnH9h>23Q|j=dNT$r5o)N2P?4L=hCGagA@b5dO(ZjEeysNc4Z^B`
z)JpIp&jJLBQ1@2~YOXJmuhKt!4CmA;y;KALxa!h_tMmW;CBnUj+;BUD%A$g$_IPJc
zyS42Jid|5(DLY~JJnS2-uMT%<AZO29<zt^7GLH#-DPx0Vhw0l>$hpI_YU4h7FK}53
zw<kMvu(gN%CPTm1+53SH8oathx$SlRU#WEfAfW%<0K4j26gmC<W)A+l*k%_)TQ`pX
zK!_x&$l9(m!gao>LjiM-mK#;C82Bz%VJ_i=!1173GRT@xCLvqKkjX3k4A1)lcqj1h
z72q-I(*B9wCwZV$R9oXTcKcBgL9du`R<{5XcIZ{4_Udv4H<`&!N4&*JWF0~X><_Mo
z=Da#2L6xK_8hJ2F1JVCw)#9wEu>RW9JY<FfZtw%FwO}bPq;Lz6wi&1R%2?S+9v{Zf
zPUZ{}`e*`X8c9NEVf=Xg@ODlPXzzZDs*35|=cdajpCg5?Ht1fzIJ9@`e86|qcTtl3
z?mHdUAKm30fEOK7htg0^y&*@e)u|$;EdITxUYgwP4S^3@+)+Yw6T>Tnzm-2&X8!~I
zV<sHY;GJ1T!)F1ru9u~;P>B(2R>{ePi5q8l17^q>!nd5;rGwo=Ira}D;20^G#lY?I
zugy}2i3(7Q!Pvc=q^xGsp0w>0Oj*i4n1CXIheXjceLT1A+V)^gMp}ojTIEX?W(NvP
z-qarG25$VbH|KD+y;C2AD`bgd&kmLIUr>AA2e(N)FUim;aVL-LrZ^61(h$KAV!p(5
zbn1mR`N$5$pJv4<atO1q0QIpw1f8WAhOWc$g4I1Ur)4^{?E0oX#->EnsJ~z#(FEwV
z&SEP2gfwP99OTp-c(C@t>StU`=EmBSoT`rt3M^sPj6LiO7ZmN>ep?tl?@%vC#GA@o
zh-d7=;u@I!kZY&!4E#>oSIGvmn%-+al;$o|%=~&FWcp1@!})vT2jE?nW;ikZ)2ocL
z1Y#cMb$-LCF1rs%{)30{f{Mipc|lEU6>$8Q@oAm=y5zF!+ALw%{L{4P3QgBPI794W
z-e0Fz5js3yZx7OZM2gbQ?{uowP?;VL?ZNi=C{s;j&S8{tt=Mc^@)(BHsWx=zoqLZ5
zH!2Gvg)9g^TfCyS8*sDt^qcnAxw%V<-ySCDw-kDXKoe(&<Iw|Pbu|V)QF0IbR0s91
zGOtEam#dd=3n?Myzgca%m|Oox|7ayZ!`gP86Z!K}U$A6EsclW_yWqoBEb~|GBna1e
z;UB%Gr`{Xr29fm*iM*tOU2OBOx5*ff=8QvM7UU6PQ~aqnIJ!C^YT?fr=4_}Z-J4P4
zd}Fab4U^Of$wr@e+x-WOd=0B_8EoHNsLC?xXJ8Lx3yevgQemlvIJk0}yf`}m^{ak=
zO@_jT;ea;Ug<Kl%A}Kwzn0K!=Y$+rZD}SJk6-|j`s8LC&mCZWNY*@1|F-y*WlvFNt
zX}^A?rK9!Hvk%g7e8(N1zB~d8*y7eBy$9P}M65vo%2;klo%Hp6zM6v|Z=3%^%9m>E
z<`f8oW^K-i5?w^`>$LePC{7HZcNsx-zxe*n+j3idc-_2x)s0TYV~4g8(woRvP23&$
z_4gECp5Jxgx>>c3_Fg@yYMho+|MSS}f%_TeY)8Ke1on0PO@tCZof7S(pG+tqvb<2Y
z?5cWK1vORzUCm!S)~>XgS}s<HetY5*A!eb>-Vtx}^M2ZLCbvM1`BVlFa}6Jc--?_r
zY>d>?t#wr><uy4!nc(YKJa7?eEcNK6TSHe37nPlcpA$<Gf!sQ+9OeT{q^40MeBCK;
z$<0mH%zPvWO2|$ZJmXR?Ivi~%bSD`4OxC#$-1wcsHbeCbGb`r7uLc4M6-cjs&zRj(
zE*5+U(pr@9*6-+8AKwBnrP6VS_K~+ZG%?EeFiZ55&@>jZ<oqcL8^hS|3m{b%a@%pI
zMY-f}WKU@Zc~trM4AIgA?)ll>NZdipL5d_P2OsEwU<e|b@0I_mmx5QDyWYq@f@#s8
zFxzsaEG-bwh4TYcwI372V~~j0NDoE+qTWb3%i<HaM-gZde18NGwdnCY%J+eN#Kw08
zhSo;N35{J>P(}XpL-P>gbmsh0!R~KC1*H<wdY=8~ZwPnZeqv@z-sBk!&~ZouBI|qF
zDONJmDP=k#e(5~wl6N9IinA^ZVzLh_7LEP&tmYF~&aX;RJ2qUwVp__nxf1HUO^cYj
z&T_OrJgP3zz2)2h$<awj!#C)JNfVCvGk%?@Ri!f?9@c|J|EDtl-_QEgeX^e;O0j=S
z9e1s4nQ*q&@(3aP)!k)8mwWB(=w)rU(ccZYTLfHY5p{jzmOD$T_N$4w6Eh&d)@Fge
zc#dW7>W&sHWVn;Q26A(6#r0GNsLN^~RPagDl?3f5iPz8ptJmGE4;5dt1oJkYHj(NC
zJm=uD2?XG-Gpf64m%#oNFKVn8I00$qafvroG~89d<`WJW=5Kf`H8a4<iK1~a@5Bj1
zn?vX+WZ%VOj3Ab^tk#f_d>4Ofd5Ned=zjI#$2G8_%bA9&&j--E(XtaGyh$RGp%j&I
z7yi`&EWj)S@Q933Z3mLx_w)&D*KG<BK7N&kqBp(>!#}`j(O2-cc5=ZdVU&A#!>mq4
z4h?dY+mmJx2;XNsZfX_R0K(i2UXcYFnmbXrS)dRWLB$zbf%($JIzg)qT&xF4ST_P_
zC3{uL8_=g-d9{ThF@pcn)R8e85hI?g?9YN-mA?1_P}KYZ34CgTD^vy)V63!%j1cxn
zGgiYnMCaSk!$`NRik^dMsybYZ-NY`^;b-Ge8K3<*W9CCOxfBf^IaD9roplZ)rj03z
zYK`LUl*>B&PZw|WVjC}cPv@ss?w>L$=k-*3Y5g5{bKSWOXl2GA$yTaXEdpvA@6XGc
zwLDw^{o0W`2|?-9*2GH;8sKj;@T*YVDkOH~UZa}9#S!1hi}q<)GRJJKP6(2$?HUG@
zULpNjYv|KV7dk#Ex~Y|p<a4>5rw@whi|KnVjw8^^waB8?r#$CV2sznp#V=Wmya6Tj
z9aJ~myjv2O{!CFdNBP5z%OYQ9o{Z8_x5){BtSGb$=#bbW7vc?Im$S&U96W_I>7KWU
z{$Ki*UDq)erWIDaH#10_V)VRUKixf^=u&p>tlv8F_lio7uxSH_>nBrEG7WZ!eSvfC
zK3%Rqm-P@H*JnFM*V?u%YbUW1U~J;YfY@|E>+-C%o3K=n#A)c;wB~|in=}_{qU%`!
z>rM~INV+S#&v@E9pR@)|EFrTu2daxZJ1TUHPc*w!N!uHK$ESlekjGmJdWZ_och-=5
zmwO@6#&k13;#!15JA2t#5ug^d`x=ifqOa`cro&xXQ;@TNqJO)uWnVSAzg9Y$#nwU-
zNcvFVT-^|Ls(i&bwn?MR9uKs~LGUI3S-1wPX=>ZOa=gvn$_v=|5Bo{uNboQriG$jB
z>VRLOCs{^XO)Dfokwwx2nn8B`yY9Ph(<Mxe;t<1~5yg~IpZsC@`t(shiKaBV&oln&
zJogFf<srhE3sRIb;jS6`3asxuU=&*KQgDWYwT!;Xb%i^l#Yo>CY9OimSiFM+PFVaX
z$D7MhWWIf_8e18L<^gUI7rQBwa;#!vGoYL$DswD+6a;135PE1+4dBEv?bnS${pmT|
zQL=dU{m2~!@4wd6y#}r<Dc=yM-`ngB4cij;i6CFfq?%%XXU*i(l(t{GyKM%nXT6~@
zSuuEbZhxj_e@xvYDg@v76{2MW67QcERl(ZbDW8es9mbWY{!k_|NdsyEnm(d}oOWX0
zW@CPyX~0jVHL>~ps!_~$S*U=RJ5avD>~Np<{I0fM6YOPXlQb$Ll4NV1efbr`BZ#r8
z9BtC2XtD06=$qffOV}Jx9^b&Six5h<@ix|w_{&!<+KF$#!(Lz`HB?Iy!1IUhrFn7x
zU!=w0O~M$b->7cz8`b@f8PYE1Hvh@JlNhg9p3aCmy#0aNn>^4+8Nnx6sng=BKxq|m
z(N!U=#0RhtGbA1IU@+SD7Pqc+UJp7Hjennh`Eqa|R;m1!%i&URXd&@0N}Ys<+Iy=X
zyq*kVXK7h~J9Efvv2SUqSNLkvWf`2HqV``5x;<sOyjSBxF~@dm8k%f&8KF8bO?8*?
zycm8O1cQ2j7yAxoe@j{M+Y-eyiNqneB;o8*djKh!Y=n1g=8?SZ_rfQ8(_5fkgVtII
z*k2a>W0{gZ$XMU8&6soIuR*iclO-c19Om4dOC7#oB#L5?4q#UX;%ZWA%6Y$0#Uh|m
zFR4r2lQK9~XfHfqvUA}LAYT3T<!GG-KXY-D#X4py8l=eK?c$R(>Ok~Ta&f?u;@6I`
zBLLo!I_uDuy@8GU)z9X%OKPp8i-|#FV)S?FwMTOg#w<K&NwEl3V3tx}LbZKTEwEx3
zJ3pL4zH+iX*XvY=!~eXE`A!KLj1{u*41Bh){MjaM!JK0oZ9QOq9r+E)QqXi+jXSX}
zlFAvo11)N^?rYy{)<Lyx3mGNr`&~WR!U@<Q{ws2u7j})s>+||rvpt;xx3Z+N|5lxz
z;5{BPu5N!ORohQKx!?#3?{Zp@TlQH)$iJ=CVwF^AQD)`VxkNU3u)otdr9OH^Fh%gs
z+(Lbtik9>@p!4|_3sL+}GB<#$t%H^Ew`#+|%>3IcYWE+oo%gpJZ0*|(c0^Ng{})VD
zFwsK@tU<|KaILDWs?C#9)fhRXrEYJ8y+*b9Yb#7vwhyF>6RX?Z$JS&*geFli973?L
zK){?RC<fk;zRQBT)eXQDjw)702n8e8AU3UZb36N^(fC5Csa%w`XD}Y9cP#tzQk~%L
zYWq?#7_}<W{K1PEU;J)#$IcDyhMUhx1Ur;aEvD%GL#w_ii4NB<>Lx2u#iF@#zR70!
z&KrHQTTYWpTAZnAxdQ#Ae=`|;qqg6)&+ZozS*1c1<qnrKG6_J|JuxS9SG;r+Ia4YP
z#xQ&hWG)^!<UxUk(v-|N)Tw;GDw2`L9@lNa&qk}RuvHD*7gw7K5k9$KB02=gjy$V8
z`CQ2ERGE#etfaR;sY?XLvIK%^oFWf%)vhh7FWerR+#n(9(7q+ges^?g!578q1!?7!
z#ssY|@52|#QVsw@4vJlZPgzGvP^>fmrzHI9rj@K8u6Rqu8jRl&U5?He;^*$kigyS{
zr-sb-FwcD|c7GZj^Mjlfb|0tz=;=G#+`WGqb|g$GDjlI!qN-Bv(eMHCs-{z(^0b#!
z=ME6fSba=b{=kpLa5c^46i$)7)r+BL96Nt%L#XMWNF{))8fj3B{<$}haz?mHNSv9I
zV-3a4I1>od09qoB?u~OEn%I2F*VA1=h>{vf8ErPiI<9oTX1c5E;o{q_BXPyfy_pcW
zkLSDKT<y*Fv}f@+ouQrrLsqWH3$&DUG(;!*Y5{Kwpi4lNufZ-4rd2X90Py&pmTB?1
zj_~=-(E>ohQ2);gLZI9MF67%e)BNpd{_iC1KXJ5fW(L;ZqLKf&w^jWHR_s4MzcnMI
zl$3*Y3)UY>y$qB~aPTqkqO+)*wCXvPjuWzgJe9a`6tI^op75rvbB%1$El1bsn=GE2
z4%+YBX<Sp1KN(<W#M7{XEqvWb>ASCr(oIW28zlZxcQSZCs)WCL0pm^z;xvcUB}WTD
zwoB<EpBfW_?(wGWGn|JKh`0G`b<ATpmTed!F&3jRFfa}^4rLrk7gC_JVuw2!{4MMN
zw-@~vlf(O$<8-OWlAG=gi)|5oGwFM8HqEddFMCU`14ToO4EUTSGns&_Yo{hR9-nSp
zcLO^#NbOA1g*bTwVPt4p>7F82G-Dis@r`&^dA*8HHccXdvNLOVXf2_iOm@Y7{MV@A
zfyKv$yaAq?DuvSSQl0DtDq}k2hR0oi7|=KR7Y^zMtI>3FD-<Kh^0-aV;F`cWLy9{8
z!qKh2_;JsXwjsZ@6F|soslNx^zOW1lva+Kk%s%MI>aMs<vbvtkhlcK;Q$LFt)8RO=
z7=+*lXnB)LS8ZzxF#ru@&5|wJSf^J&$VT)EO#8Qhli;H@5m!M5Qit{0pA0{MU3;<>
zm+~~pJeB+U^lye<y;o>1$i!z&G;EI>8E?$WFwj*0<@_u|=DZ!iHEUu1UJdBruTsJU
ztRQn7Hih3qjnbf}bVJ>=A=L9arj;NOD#+_{s1JO+hRPId#wKaMxBX`~NcK#6P7P86
zm&88wtl#wVdd#hh@ji6owo`Ke*epibO}_C3OL55$lq@$5>;7PUqSo#0CoP)0y3d-g
zWyXe&wW8w2$V;Y0d2*;KJ7A(KrGFhXgdrn^HW?@DP6*(5PMy6JRK(%(;|pl~vMU1R
z^}jltqk)dZt;d71){x4^Jj|4fh+ft&OOIR`w%p9dpPKaLSLT+)5Nu}wv#^?8m|yi?
zpOb(m<aw5S52e4>O@1?;?}}H|tG6somlj^m10HY_)F*zgJU^C8!0z6pTx4MAFXidc
zaN<&8zI~M1BoghT)07FZm4WasEERUg{e#h~4y=|uD&m}KM*``Pw7uJbwm#AoWA_bt
zGN`*$4Ajmt3by55WSG+cY!6TqrUS0vVQcHeCC&z%w}~Eto_<AhT+(AHm^!!1AjYrO
zlHvH`4^q_SHQ>4PR$wrmg%H+pd!PnSs?&{duHyiDN!xpQnY)-@G>s6OTs&b_>%0QX
zrow%bonrkz*?W=;@A&_DGTn}VcK@T^;JYjNANp%tAa~pUhcwL1#?9?NjH4iB-S37B
zRPQ5ojWq6Gg~pz^+UV9a>oId`Woy(*^F)yJVH8Fz>jkkzA5W{~3WQ-MVacA;Tu&Q_
z5}Nk8F+jsq05Ar{l$*MJxLES68vkJ)GP|Od6_P$JBx>!zBf<~X@;>TH@IvE6H4O<U
z-1L)`Z;=>S)D@?kSZ#uteWwO#EeGX}cv8YouPln>laC8r*;qS(N4;6T3YqDw@$PgJ
zDP$L%t5vf$b~g&nHU73<2Y~yDLhpIVwQ@q(0c#uc`~S(_Bm};R9jy$X`ciGL>3s}(
z71!>*A;eFeN9k2JQpnpqlg)(d)7gq`=d+>jZO)~|tsC38ws2sf(2wjusXz@hkDBJf
z2i*V_V{)<1UcFzgF9z3#TyAB=v@_0&?G{2VDolv1>j=TXd<ZQ=8iD?I(Wb8{B@WlT
z5h_y>fRPE$cEPF)pQ3ePN9U&Ne&tmx+#$3rm}{z`>IU!eBm=Ch+Y~9UxW{)i9pR8s
z{1#YT4z8P+WrZ@g8buBK$0|RhDOi^!>{BX0u-Vg@k8l|#^>f2sJ@Pr$65jH${Sc-D
z!S&Y!zKE%HzLdwDzNaJwo)+CxO<*f-@aPu>U}kIc1g0_pCkKQ^rg)n%afXIUV9`H!
zm>(26dEaKi+LY23p+h>2E~+k3lLZQ%Y1oZ5{ZOOVUVtUOg#Q9b=!=8f_S4XH;-W2b
zC?A)|F(#ETkvIcy<@s&@{a;Fr(T|a-!M~5)(SM^#0N+N;f2IZ9{=eL}|CFN`sci6%
z9L0}IeIiU1rFhEq;nc#S8FY}}1Q4)6q%7fd6x4MGk`5@C-*K#?QSok?slC5WwmLms
zpJ>Oma6%$N^ZJgFYh{uFg;g<DS4XP31c+1D6Xx?o8hZtG;$UGvcw<x@li9_nS|u%Q
zDlo-a!}Db9EoaAt4u#fVNLmRC>?Pzdo~G|Y?%$eCS%o3K2qilltC}QvGv~nMzEv25
zW)5z!v^3TB*j=Nyd;51DBEbDNR8b(h^e_q7y{gn}LIW_W5lEqcGnm)ng>-d&xth3k
zD-6c-f`Xu@V+FcIF5_pBCbqWHQ3~vb(9V?wFB+g3<^vCXs>!<s*dKp!o_I~efNcT^
z4i#$_&|3Z9ecMJGS)q&(z=Ys`*!{+;DD`1q3#*c)X&H54EU4uC5diyeILp!}2Hn}a
zm-SG=gb8&yZs?&3NQ##W^DD4hmj8Vu#a8i2CU6PWE_*x{MSk5ydx-dzgmnF!+1sM4
zY(q`Is4#xhwagN;YYkZh$}{Q!>(S$!c_NopKQCZZEd7(}l0v@M@5__07)3i;?~Km7
zcrss$R}9=$XELj$Znwe~njfX%_RJr69Cx^LmVTAVsuj@#AdHaL=vu)D^ioD+&z5$$
z5q9S5!f^Al3kh}2G0?ev&Dd<qj!$ptI}tP)QSl*vn;fxSyo$Mlt(;q!J<I+2FGtL3
zBES0UAB*AdY{CCg^XhJ3X8Zq3%mDvW{rjMXyW@Hb+LwMdNI70=LZyPbAx|Avw2>o<
zv$qu;kGA~&jsU3{(avw6ICZ`*PXeGgVzM%G(={sO=#dZteEcoh8A{m{fuHCiGoh>{
zKE?iwv7|)P(A#dv-}jgm8(5my*j*Bp48SyYyvHe#!4KA-C7}T+3uUs^AvBSs2`ea^
z#lV6C1yLhaYllG#cQPT%v{x{%z%pnN!t{6Dj00OQluJC?EsNA>xWZ}{zf^S6L^)0~
z5F8v@@@J%PaLX`MSY@IP!}Kp;txAXP4E;!k&5(8)V-k~zAPMo!s$h9GA8HJzO29`w
z$Kk4N@xmA5!_)MdvL$`q%nZmIsQ^6*r&GPPtXyACz+bCWCkK(y^Yo1%4Z?g5bxcb-
z-(U<g>|^kR{B%-sOLhlF8Lrit_*TZD8|07$@gD0ytqp-n>S?kE$z6~hwT?>DBwD5U
zoUYoTjyTQmY-oW7FP~fN5a`EED?lGuRHpyDhi<`3-${!DXgpRl;$A7*Ug$Ndr?CZl
zb(p-pBg|NPOJ}RA&E@6|S%taO;LtuAr&kTa)Y>FUe*E~XjS4F<$0(aWHT4VD=C<Be
z)XZRp+xF~PhKHlO8(0)LvahRSAQte89(n>&cU_YIo`g7<rUS+`GCtWx8o*trVzX!*
z3S;viC!L&%ADSeK7TPEWrX3JRE7JC34frC|-e-66{OP-t;BlJ(|52{Qw)q+Q&YF`#
z-;vjkb3hLn)()5F)|6tW{^JZBKjP#)TD~9SsHXY-Zq~HcLqpQ&P^%j1OE}=5SX&!j
zXR>AD7psrj(ez7#PObtx34od#Zcjx4DGsri|C^%L$=c4A@0OZ3&m+s<HGjR|X-G$*
z&KTQUp!i|LnwZ-*TFI-rt4Vfsh$~5eQ}}09<QI8UityUJPHbbg{g%d!(cze0Z3^*A
zTj)7?q=5Zo={<)wSA7aYUg{cT2{Xv~;o{3C#nI<o|HJU1+-J_t7$Ae0l7!j(Fcmz?
z)&?>g_Xo~g#KDh=2z4-J@hNZn%b#0v8QuMR2NxeNi;Ka%0U65Hhp|TDkM5y2w4@TG
z?zfAdMLK5v<@eAD=_5J?kK^^Rc}u&}^firTtm@s#4I__waa!FNoMVsV0`Ka_9VJ?W
zpcu4?h=<tAf?_nMO@Mcsvsp7~fgB*Qvs$#qTN$17HJ{wtXhx4gn$mq!VuggRMdWJ>
zcVE_rZ)Z4i9Mr}dL@ZR41#;Sy!D0i4XJd%?mE1d;8`s0V5~1)V4}KTU{adFig}<94
z4^%Q<%;Q%)u%q|+l<%5y;n?Z?uYD<eoQKLZgr!+TMDEAjXFzCc(I|y1?Q)Z~{G8hS
z`hg8(gI0AsZ*xyvOKN|B&gYk!gk=~EJ?Xwe6hN#Sg>_guNFRtN!V4QLQ1U<kI*XzU
z6?`}>1tr*tEMzHc2<L!PX7{K<+AsV4SDRo7jl8P7I<gi6Y@ZsLy#8Z*%#QG>vpU1h
z>K{J)s*FExOMskaL_qh(re1%v7)`!b+Mh>L1jlZG%O&n}p^v58DmEOPlfsBQ5E36|
z&;AR%%!AX28D@)@r5*5;K12a6Qdn{zcArQBh?tZ<a8QJ0&&^J;ZRY`a*89UB9zC#-
zyL44dxG{}$L4y%69Q^L;x?G<o#h{6Kuin|WUPVN)ZUApKn4rvnWnY)2f^YA@Z=euF
z2tLu|@>`M6JySlxT@J+fTU3(in5+k5XL5N;4|)iU#QOedPg1?L9C`r5xbSVc@pQid
zXaANf4z|pASn^Ufc{4}1W;wWX$Yt$%@>g)k@a-4U`@_c0b><EKNooNn4nCD6{qv*?
zgAN*+1E6<-Rh4Fr{;55+{i*o_ms9F6DZw}W<>$rZ1R`VDXB(pm9H#s=*uE%bi-+2$
zH_sacLuau?dv1p=^b~Lk*B5(LoHr}XU+S9$U}rb<7WckE2!uFL`ldvNMz3%eU?AUl
zZy`LpeEFGchX93fKHDG6m$z&~tQFgW%buyyM1YIP*bpP(9DyZMYAJo%xj$Z~qkT8c
zuT}CsUXe?3O`xQA7iN`Hx;3l=G{2KUX!Tuk8$ra$4lad~oH^FiZl~a3@S?_~w%kWl
zf^sbC#K=Ui`h+DxGJr=HSAbu{1x-X~H9FWO4#bGso9!mjU?X1F!HXT*b5h;|yGrIt
zNC5bwEBS%te;~Z)?YuuRo~BZHbvEUD@Lpr=(Y()FIF6PPVhDGomQhJ2C64G^cQdwi
zy<7m!_`eIa>F!1!9zYucY|c&mvNrikz-9#PPmn?OU%(<s#CXZ@;VEwv;n@8=GQcRc
zD-nVoPKA6bMY|40gK5-CSyOJiVub9}D*?HDI)9WheY*rsp-j4~0<5fAsDfm|2njH4
zyoJFZypUk)m$zDgXXZ>#Ocbpl8@i-Ee94`rmCVeaKujzbugDY(9<y3E*V9*LB7}#{
zmo}6IU1Onki+23ePk&v=5uN<Rek|j9**2GF>E&_uc-`?04qr*!O~6sQu*4O(WCO6g
zAiQ1>s3xslvT7wKnvswlrla}ht+7&bn#;%cahPeRDZ>b32D-X<ZEamj<!ZCic!)xx
zde<m+z$Y3(pdG#~O}wsEQVzMtckO*!<F8NN_!cW|6EwJBj6jlU4<|Nuj7>GVbd@`q
zz3xkC4Xp0sRI8zaC}4}J!o1_H8Ufd6visp?B&8~vYDAfWnu#J0)X09hF1Z}u3stif
zYKtZwV2apZVE+uDY=-1}1;2l?|L_6+kD{M%P-S86;qmVw6aXkv<^N!)Rnq>kjIOA{
z$RfHlX4Q4n(!`=U;%^K^6A){i)-?ZJGAN@XQIE)vwY}##)%o;GjZMgcln6?Sidw`$
zQ8iC&B9q}vi59=1pcTDNJVuSB>M@j2z{Gt*NQ!8W5XPVINQLTNxIuqqe%=8o%0X1p
zm7yK*YFGQ7pj$;hgBAD8o<X5qiNhveG;e0pHMc1#mG_cwb|sUvFqXbm4-%Lbe0NoE
zGai0-rtRQ}99voSY>Des3QMU-?sp4^YhL1;%w$1!HF{At@H(XgT3RyIj2FyX?r`1H
z&(jrwb{O6%$*$BO8|sPW=Ia&oGeC?-gTn~`N{P`Jto3&M`3}d`Cv<K>Zx;LDH#0Mo
zbs?(N36Alt?nQvSZDYw^dn8pB_}#^JvC<<<c~cRPv5wG&=Z-R2Wb?-;+a_m$6~xsZ
z7paiUvM)a`M@!i69GoB_N2^LVIunkh2hqKGJ=UkCf7Qi(2<4~u+f)SRu%~FO>8}Go
zRv^{A>OxEebxZ;kr=8i#?x4kkbz)9ak>xxcoq}z=Abh{OOK~dS<b+k51MTlC-}$2|
z+}bfHj|tkZhW?a~?hJzs&z!*%7dXgZ1^=16mb|!Cn}VGJV_b5kg;vVH4vp`W6We+?
zkm)g*;mDN}z3(n4Eb)@qg|lz`Z$oDQtfYwv%U=uc14Srd)utK!^j0dKzLg(jeJ{*d
zZC_Ah5zD{qMz3yS&8Q08fj&XZIK9DAx!CsX-(et3mMbe}8!Io#pW*-ccoI22QiFUS
zO;GrM_Y1alwsfWk{CjhCqMGGD%+=p8OR#{EvN6h-LKYim+`meaqgpe$QFdvG32Zgb
ziaf3)Ho?es<vSE5!C2DgoCuViF6*u9DTY@rXklM4tYGlY`TWeiHOU7#)a@fWk!I3p
z$s$N;U?4Rdz=;k`<q|AOS(T*qs!V6>XU9BAqDowW?q?d=8mBZOH*V9YH9w*r?uE2!
z#NJVdop$aQdwZ#;kI2xuj<~#^@k6rmPp{6I)8Dqo!QCy*++#kEeR^It40(D)-m~4q
z_ML;L{N)h+kAB<D{?F$i@2d%C+}R{W?3x%imL#`u0A!M(q6TI^1(JL=p<2xlinYNC
zf{!M<yPU_S&9WZJoyr4HYBlb=AW@*R$H(U?`J)+P6JqpcS&xh<GZEwk5I<yrHJY3=
zO#I+g>?YKA7;$t8<ZAfKoU+!fu}R+9k3kKEKS+<7KwS|-XX;YM4&jR8I_76R*)oD>
zR#OD{fP7A*69T0ZtD6QLZ_khZ^zrg${!iBTm(2%;D?sPVMgOpTvuiit^LDY5fOE^>
z>iGF!^rXDr=P3t<;3D9X5xJ`=g5|7IL86anIVA<Fo;XRPmBD3lrxC#lBp;~Ee&H+@
zW4~pc?-D7!Vf18l^u}B`kOEj68_UWbasoCLAl=S|-LL?M+)iX&xBqND7MJ52QDZDt
z&S&F)=Ph2RLA%1AmyLE%xZwVSw>ARHv;O(;qUshMZ=LAb7W2H4&OhTrSCL~BG-#0N
z6*o~#NBf?uvVC8?KhmeSPWDKHBgO_jJfvpcOVym}&U}LvI&JD~byY%+UggTsYp*r~
zKo%}z%v?qxAcJo=l>#c7JypqzWQjexqk3*Y|J?$|fV||S{i;(7zgx%5Eraee01C1O
zQBX;FuP1!u4=E#%+XK6BF;Ot0^ap{#+<Js7CKJ*SC=WcJ&Vif7DoN}!ToU8d6|~$E
zQ|bv}5rKif4Rx)nL6J&C4AJYN+L9m?Pz(k^f1R~L`e4u`6`$GYG)X{0f`#mosz;Ap
zE|erdr#?w5D~f!Jl_H9W+fY(5mzw5B&60KoRaz8-t0*#eQ^JF<ST8=j9uMe(jpPBc
z$;oBOz1G(JH7DtPGCby&p~Wgb8&&!`L}^N8;s?H^l}I$G`^eT{6cG=qEHFF;Akn58
z--5!{s!&j2#qwLFQWbunkTGxLV;~ee>O~sFT6Z$X*$mwnu@=-QSm}lio;3{li~Lk~
zA&ahBYd|V5bO~2DM^Mc0bhf>=adGo?<C4!Nll$19RG*N)Ayto5+aVEPAwqDnVwcl*
zdwxElDJxwVOjk@BL^EsMS4{j1kPO#-hlA95mtUdNZA4RF_k}{TN&X2YCIzb$QI(64
z0>m=lyw(ep7}9?9FrU2~9r3Pb%whZ-#R4wyqGk`Js+J`ynlK2>b#@;HB@Ny~A4iqc
zW9={{V2r-UJ-Q1WpJ;}F--&%c3})k`i5dePfC63(DR{T$D{cu{?l1ZakP|AY&jLFR
z=59$USI_yd<Tuk~;Trw(XhRo_=@emVC{xM&rfM0_3isYp&Dz1WCBA)I><><hdrORb
zb0u%X2}r!KWm_w-o9B(2-g7Qve9%W)H+i3|T<>jGJb!ar_N^}h<4?pDxq#lD5_<EF
z8C%yTa3cJeijZd5ghL$|fP`^fClsQX-v#6WEq_TiB#*+(xUb*U;g-HOOD2btdt=YT
zqjg(f<0r&Fzp*4s>Ow!hrJBv(4RQbO3-FJi$A9w$0Q@8VwNd@QxRUQzwnaj+F^cx!
zw{#=7SWS_(8VB@TRsUZ%f2&z098D!jY6hIgZPx<I)VRbZ{UUZoXRph|;dT`5Bdp}(
z`jI(&n&s3IwG1_y8%mE(vgxG}UE?$7B;tDeB>MV!$kF_ui59_@Amgx-9|a+Qfvb4q
z3;@0Hfs_5byZ&VuotRF9oF7UGM38))>Xw`bynAF_5~Ug<Tptg6Z<3-0(qelG68PfC
z1Kgc@wy`2YIsEmz%zG!RROXoHU};S6PRv}MD4#xTf+Pbs`o_2w1rvjum<X9n9lG1y
z9WI$SMryv&Cyn8~*FWR_EXG7I=x^7im;h*hW_h$%n=s(X#><f3yS*;|fNF6fE|Nw)
z%qV2mG4aBa6a<q^0yW->U^+c{dP{~0%_9|@)g6d4%bJ0KYG+d#CjtLy9sqJ~<#!y=
z5ey*9GXgwmM-PGuMNkjSX_<Fg|Jg*6I*aOox*${%Djvh+Ezig{G+LOjuP_VCpa=Yw
z!5~+NK_z$66;wv&<28rV&I>W`-Z-6}F*BC5Bwp+<rF9F(_n_-@Pf{BHDVI-L@S7bA
zhX_EIzO?#YKSp(0rP^i|(JtAHte5Xh97GF&FnGLW_Z?d!>tleDI+wu!blSmusrn*~
zoo6r~xyz|DOIeihH@p5kNs(88vkj2DgLm+H8sT*ocy+=XB0Hn3g=hL?9w%yV#tt=t
zyTGnJ#3$UX5a#n{XCL*~N8FN{3ge-Qnk~2Jk5)qtl72-`0M4rbXV)K2E!2#86(6Nt
z;N&^ag5~AF7KRMVBZ`_$Oc2EZI_o5yIR2)&y17bP`$Tbn4KZuZlYalh%6-6JB$Ni#
zXu`qixpqpw0A-Vvqk_#n6|_ETd5xqv*_o_{y1-**kfNj_*;=3BdWgD_htiTb$PqOp
zQZ$NU=*XO7YQvU5&!t?t;P5fcDwzf+t%an+xbg-0)<NR%mo!OelL-QCUI*2RBPvvy
z_407?Ypc`AqLj)V4Z5LQ(is5HYYFHlp1A)u3z3G$yi`As!M&0*3x-HAoCmOd(We!S
zyXxj-(l0Ard5+&L!CkG-dtMIqx8xZa-p`nNnWu+>Lw)YLq!Le=irsCjDCEXp8qUG5
z*kl6SF8UFf%l*V$LyapCggT?nbKdf6r;w2)8m{@sp$P)#Eiu@%6#9TWE`yUwfwuH=
zb+Qr&`KhUGwv5P`@cDz1>WqLiP}yok*=k#NmGJrBcB6Bcs7NHgXLc^gSytmnS;^8i
z70K2eyUy6N^mKJ~z$qtZtYLPWqZOWDGCQ@CRZWNrc)%OMvl0wO!dVe!<RWstt<P&Q
zX<kTJGgn0}T23X>NR9z)R)Ao+u@5k*THgb@pkMWB>mI=iq#%wX#B$!vFiDo=NF4fw
zHMvKzeAC-^ojYCpytf?z!e4f7cKZiApV3>^8j2H`4fRJCecSF#rqDry+DR4Q?>;)E
z&C*GvtDWTA6$R~6h(jlvUIhnlpIxrUS9l#B2;kMkc$mn0BmltlT^8(Jd-#CG{tM(?
zem>vT$odoC>!}^r7tw7K2(Y!)L#4obHoi+DTh_5dTPm}pyPjNmy6?e-+D3+L7}=~n
zny@g)J$PDp-JyxlRXeeKN+7(_m^z!qDdeP-MNu`<n%xiHfvNjnYMnvX=;xDKeyHcR
zi&PW)2aU}~a8-c#h993O9&hd5PmV)MO0gM91!bP4NOtO9>gu5ymDV|tSSs9^dLiUH
zV(xPDDev`cMFuJ1{vKdhR#(C!mOvHlKZ-o9e+6HYsx>S4YTM$i)G0%{5L^kA5^=5=
z3qbZw4EjNfB^Q->f|TgF{yqPRZ{_+9(cir!*|Wca3q%EQ9iO_`QjlO)ZJCtpl_?_M
zS4*UCP1paq`21b4;W1(((`t}ut>4%yGFk#?%j>2)@>Vg~74w<bm1aQEBK0@4(Pj~H
zLttS?;64{u-ow?Qvka6>^G^(YivTDORehlm&1gYwJv?A$leTQ)Nh^xQk%`RYyose#
z<-eYuzWxPR=sMICW0gkPmR^!u|5P^9ftFxtJ(+2tVJRz_@c37^Nt<;4OD7=^(0SOu
zpF001-|~Nsx7^@8@z`Jo#C<)WhBlx}wu@#;TFG+r;jc^HM^;ZOWvp78Ki5h&q#>mn
zyB(*j*w!)BMmO5ls)AnwLDy-!n8am%zK?Da`f@8^z<}E@F+2tw-=?6wcpcpgiPgyL
zvYPUn+N=0{Z>T*Eq7#z0>sH!#UxtQwBa<f{cMq7OQoLM{C-*P_&F-fj?io=lRIbXl
zxvXObF0qgXY6vVGm|nli1|~B_vnZl4{#RdT0aaDEz5PRXcZYz0bV^Gj2uO-ZcX!vJ
zq)R}$K~fq)8U#V<5-I5h>5}}my#IUOgI+!uXAH)G^V`o_bI!FkXYBP%c)=d&{*z95
z&YjCx2H<#NP<!8kIr1JChFQ#xXb+gdEnn=5Ly|aG;7lYa&X}HG>tJSqN*7vTV0!?`
zYgkL^I44L~Lhx`V@W$?B(|8iQ4`yfSs3jGKXS@sC6-3bRvg?|Y<-njzQ6~vG?!6)L
z$NKuv{*94-n0a<q3QjXaR|9$$Uo<kf*@24!R>cefkrvOdT}-c6TU8v_3^!9ZhL;In
zP^OaZ?r6r%+H9JJFA#E8>tS*(!R|YYTVf((+1U)9?I4^v`y8&%)Y8Q=noWcnutt?b
z+sF@q-+cLjaaG-pAe}xfcWSCJCa^mLzwqOO`g|Je<3^WJN*0dVkZM7PL_w0uR7%~p
z%%RhnG1HgqhfD8BBqk}jPddK-8b3#UaE8KrAU-hj<yD)13LEKIABOVI3Tmel;rzL;
zg|EE-AzY!z_1bEm<AVf{#*X8r<H4m{%p@YkHCP)w-%E5VVrL0gf^~At0z+-?dsZue
zy4c}uKJ)2i=qGlV53Y$!AxR`bOkX(1l2$+>y%=px_KvntHv&)7a6`F8Cu`#1yewIF
z5|)Yf-h?U{^dW3UO-QiYCnxLMeV*J|_V7Z<g!*7MZ!i4IuI2c%5uV}8`Z5WJE-!NU
zUP|y9Ln<YFXM8E^xIZFwQ*#E$)~|1tof5rmOc3)Kt9gHyJA0;Wx%|Pg+sDB++|%KP
zO_;PdCSgClrfc#WL)cI_PFBl3VlrVBd?oCesO`KdiFpxoUk40H9HD;9d*k`p`*{~R
ziSUnU_XDT3)|qthV*!Hbam;vRG!5RUp@2&;3N)<EI%=WmcfY|UA732DxhjeunU~D?
zeUoSiv`o~x#7S67-vyg-&izyem)Ig~k790M$Vh$cb%edIyf<c14^-yuZES3|SD_W~
z7m}o>krb;b1AM7TcWrfpmM9cU^5v|cX9d`L?3*8fn`~u3wk;y=%v7o%twETv!@d@9
z#8U5b5Z}s%^Zb6|zA@(4O2t;d6T7G#RR(?4HOlkrL#&UEs8U^x*t}wz14rkd=LYN;
z*M6YnIC%(9wiHSwjkel$_`mQToE37^5%LDo!E)PL#&>Za-xCQofY6UULWKgCC!<;y
znHTjyfPJ>vC15E1^$b?F6ujfJ$c}DsR|rwC?$TUt`QC0jl=n^S)W+^GmUltx#n{a{
z=8dxJW~JiNDMn;CxdwVtG@oL+dyhLznJ;}dc+%Hr%-3fkXci+8b{gt>pB~0r92;CJ
zfp$UG@hmZv-Ga*}GO~AB#@y?@IqX<NWN^bmFjra()Mf4ST|x<`3gQK_!=*3aFOzsg
zL3|Bsr~xrCemX54hd!MdEh3_ZJ}~GcY6Tg`Aw#VT)y6pZ6|&#Y-_;(MsBd-CO<(F2
z8I#C6+ts*w*3^AZi=<0^Mu)tF?i|^fdzG6zeO2DNS@mnLq<OkL>ZIZQ^|{`z%gub{
z!SY_A=@+VO_anF6ll{aLys5bW@WTs5^pl1yr;n${P(vpYqVjC7V5IkeSA|w^wVo#l
zO#4L~!N68Q`SZ+h&Fqb@ndVy+%fF~kirUv4LzDwZk9&^9LiE~YG@M}(x(><C*5^}9
z`v9LV%AG(NPF7`&k?NXxdb<_WntI9&0eAgG7XpzXX`2_bK|9)r>TvvERXe1-w0Q5e
zAfj^kp00Rtw<sm-EVJ(<c%NJX73W&rP7`?xSecHVVe(aex9{X-L{^D<CHc@PSB`UO
zp-G<7n&yY#S}%>Mdml#z=eCw_rP_L1y9>EMG6K&-4wR+%H9n8*sycn=?3}l{C`?DN
zDgvmOUR|`%u-(IOv`*+wV0I`~QjwO>PZP&TCo+%CCVa|1<&e?|HKbV+mLN>Bd$M#>
zx%4~_5GwRLohNP6Ip&ojEx07zPF?y=1;yj(9uq8hp}hyLou9>{)~$c4PUR%BZS0c!
z`Vz6=0U9y;0{G^0uM(Av)T*e72xUVb6zq#pP4<#RO@zp|8DlKDYT$-tRJ|Zx>C}@!
zb`7YsKGpo%@-uA{%>?eg=mKrMP6V;Dq~imzj({u$6h01SSlwNQ5(hm)>y`MDM0Jy5
zRmr&AVA>BMQ|x-ClkgHMIel_|us4;&le=cZ^Q`V~k-fsFn-3CJdnLFL7%d*Yp>V^D
z8}7<iCPe-b_N=Bwz!Q9o&(mX1?rHxxg3Y<qWZ!+@&CH16C10;!w@&Xb0pZgTfr);j
zcH?FL>XoEW%f&M-CrYCy8XGs4LAcn)iOk_lrYNT=;=Y&%>m0iCJPYd;p~V|fCiwJX
zCZ<T+<j*>IEc}A!L0!pZpIZn!9;2%6pFX=*AAMRx0x}$1W{3e_8%p%}nMmin8s;o!
z!<z_w*A%23G55qEsF6b4Y{Q91$j6J_p}><Mck<}#{<YyHcn$GpM}$w<%Yofs@8|yK
z*W5n@gr^z{P3-mY2|RYlzFQMj5t`Pm9Ce~vdU!Qe>z>B?p*xp-{?fi|&wTWD=hX5M
z(r3n2)e;S~j0oW@aFM+8^lqLCJ#PV*xT9REc-HP(mZ1Yo`W&rS1ia0K$7R-vpKO-9
zM*$1V%ThfM+8NP9N|J**xOiy31hi-2-ZsQel*1ZoK3n!Na4K>8b)q+to7#?UdN+11
zCns=d@FiTaYxq29HA$)LXXCMQl|GbVlFVVn2=DQ8YQ7&2gZ+>WUp@%aERBL;cnPh)
zBN*Z}v6o!r_iJ^ZZhMuNm0UA*v=!{RA97AjQE_#-N{S<p+Eut{anq+4=rD>}a6vgi
zCDSa~Lfh%xx`~j=AFi1v_|q^XYc|>)Kb>Smb*%Df>ZUSQ3MVfvG<a6!fo5lH&=I5h
z3)x_<;wXx9YH%?8rm|Vd0`>~ORlDuSvJz6w<E!*}H60qNa?d21{DD|gqE1a_P=ZSi
z6>+bd9pzB*>YLEz+Vhd8#Eh)bGKyd|n$K<Zdc3dXON1q(z7%wl9&d097PF~P^e6HO
z;?j>t3y)7gi^2Tj;YA`=@7If_aSm9ue7Ewcd8ak|n+4d?-kRbZB#hy;$+1>ZZrj0e
zRGpCfxjI416|bN9fN)86aDPSuXF00pDx_FP#2(+CCAPl`X0k)B{(H9zpFSNnX1p_o
zgHa_3_lEOmFEoY^Ls86po+Pz`*2o0R)%G+SB8gfVtfDoQ2TztBF<zpK?Rv!*`s&sd
zL9xy68Bf7Zyy{kBI*ZAS^TB<EZm%lEc9#<Ow@nusIrxVVz8@Df3L^awX!{(V>+XA$
z(mI9lkfKzC=s0$4D#o9=mxUqqn2AL!vkB2d3=KPQdNsfbr;>qaDqi-}!!I%wH936T
zv`-{qx1Pvn?Wh(SehY|f>VK9t-STXSn*FrYj}0LY3~imqF<Qe`G5PVihinz8eo+O?
zQSTwDPn}23JAV*3nd|(md}&#xQQp1^u|<5X(y6(yS$9!5+t&}in<ihgq+d7W!-Ql-
z%Y1w}^=UEp8&CJhYortI<X<5~)5LKs!Olnd&*YPP(&Xi!;Y;6C4VeYMBj8DAXf*Tm
zZ#@v;I0qj3o0uZ+Nw*c)8`?ddZilnlWV6Zn1YbvH@hsRd66I%SCRxlC{0SNpl!hNA
zG*!4@8Q^yyk>}i3JFs4H2;b*{&&r4MR?KYus;wJ_03<f96^A@h9uJLBm*_zfz;Jvl
z)F!js$K0>8c|Dtl-;7!DM1FT0+GBxUCj>^eSqOY_s5dI~8m(Wx3t57Zjz9{RlQn|V
zt`4o$cdeTZri|dDLG)&Eag%qq>3aNAY|-uFiK)oVw{9&uKM%gYW=69jI{h#j^)su1
z=6zt6iuDBUwcLr=%}Mc#f*0LZn!JV@mHDLC=-4@wHjPMDT$ep+8@1!HN*_pR%`sP2
z+8g4*6u(NSgvWZhHjQew9cDCimdBW@3cd2X$PI!JCf$9!GM2!j2$OL18SVnH+ZLI1
zaf0nChU7XA9;GyIF0B+3WZ=9?Xdtr_e;DE~XxA}M(Uczajc_^}RLKJ;*`+e`=9Ac{
zy`kiTZFrl-G26QDhXQJd<GJG8G0Z150}X}>U_uvknMU1!Zk$PEI=i|2#aFA1_0u#}
zFgo-WeDcX5<{4SVN4)Dl`?yQj%n?%YIJp9P3L-_9UC-@lr#!TD9uCp<?9Z4pA;-d*
z>aC&ePeRukCwWGzQ0>-gqK+~X!PJ<Q>0Fzg1;V6|mj-i={mhy!-4gGOmI*+~(OH_S
z0~0ofTYcQ;H(~TU_!4nB&8XbF-0_tp*>nhcxBzEw^o?1vdV~EaqL|^{z{lY>%9rn@
z6g9JZZ)(r2D@VK7ubMe0p$Dtztmqkje2G+o^9!45#{SkS9%g#%F;<jm@}om@YeXAI
z?WE#}bT!>?a*=@_c86D9`5lAh<g{m*)k|;$Z$*u2%EEL1wVS!n4sU#Qn#mBY1h36S
zorVSL?gHa*7aY9JLIw)iLOQ5G;i7$cOj=1Bk(3aeh~3PV%W1mH2A2+K43_yvWSVVB
zYqkR!EnZ0EtvYnHVKh(VGh|?kntM8|yr>PHOxnIq)KjL=e6>yc?MfBRKywUVi|_;3
zuIv&wh#(+L5-z_heAYr;5Ed;b^zou=XF8NLSJYw6{IsKjX`Z1~0O8kUJa=;GoQ$zw
z`%VI3iTkK$0aRbybmNX0A_(L{;IUAO`Wl2k_6)%h$ef-rH;6skj2Bj#emXYiUnZE_
zNG5Scwi?ErPBzrrJ+=hCxd7J)nItlR2eCs)KTC+1Vm>n|WNoQ!3_j)_T%=#@lp`LU
zdvh`Pxf#PR?|u8I^XY@M8ZdthnHwV586%Df{8oEpUg+n#lY0XQMpWpiB+-7Csbw=5
zaEYg;d;Gdw!!>^^2&r+@D;f0}jYY-_jy#b01A7ki4}%1YRH3H19o<2~P}1e@U|Fog
zXw$i$f+iTIq+_8XMrtlNTCr;ekFQQxbbaIaD{Ld*`^+Tuz3O~XK@un|iAz6dAygXm
zyqrs*&L!@#Rx`2o5?#@|Ef-K&ka-L~X2i-1I;*Yv_sIFF?xK2Jb-tO);?ih><7jUS
zx0R0R4X}qAsrwp{))k%W&yZGou)qO)NbhZ9aNO9gW8kjc4oA?m{BUb?8D3NnO`NL6
zmu^@|60NbP)<?<{Gr515+=#U9%>?&R=HzX~Eq*d}>w!ucLH$-au2LMmT9MsUr5947
z)m66BC+fkY*r%Y4h9o<Bwe)b)uPKdPGuxL|NZeFJ9<Tix+Y$M#h^gs50wP%EMS_X@
z2h<~3CEgE2@y<0C#va}k^o$*!Wzph;9#LuDFl1$f#DyStRhU&skPR0FCH&%xINVfe
z3sBf8-Ttni_|)J-yv^HnUo50Hqk4Vq_Al$QTt3+_DF?{(rG(n~9nEMzNMonjlhSzR
z-noS0u!Za@qnW-FVg_1gHs`zn%QhLAJ$j`|@M4`>MbK;1Lgh#fYpuyS!6KNFO4Ps~
zYv@2rZz6{9Q+a#)uKUXpM#Y28K-+?&cPc@%{Q>2aQ8rEr6?>)bB9@*DCQ%wFQHfo|
zYb&R<a>zFVQc~{>T@nvHJ*P!-u7>8G5%SQIxqu#UZJQ?#VsTPVyp9>J0DCA`8bcjz
zD9(Ki@EZH%Lna%!ZK9B|G2-)hlB;Na{nCrzy4@C=@C=Ml?ms;gcanmfp?UERL&Hel
z4^vqpI|=IY&~dcZ4#6_5L*4fn_mPuIRFBnhb4sm_z7}_u>NyIzcK@e|q+s-fXag@n
z?pnTe^x4Z3d(*D<x<SVYNASpJ)Jcc_@_o%TeXVsaKVKB+KAm@;RVo@Tj2b)9Y6y=W
ze+6yMynb$<x+K)I^SSp#x(G9o=EJ8v7R&QT7^<>TLXx)L6qvZe`DwpAu0#XOWWd@<
z4nq0S@2587ZRgn<xm>B;9|SWeK6WefbTvF6(kL!L>wi2J=2tm^6J!G3x4;ylD{4AS
ztM}7Q$@EAu=evMQ7%BehY%SJ`&_nm8`lYAYYqm;IA&X=nfyjO=si+zgzBUTBq!I(6
zG4Bj+F6V1o*U*QOx=s!zHk*2rhT)=4V@7&Dyb3R>>Z2L)Fx)?7W)&6gG9oT3I@Yz+
zQL!|u@k-X@=dW4FZApcKlbU2Yqonma*WRdm7LN$TKOszy_#mFRX+Aih6DX#DjW7Qp
zIY9$?e<WrMm0nebT2p!SgSO(aJfXx3R28hJ#1EI`+brUkm4zv6PsVyu;tXdteD?5d
zaQBC%-hDcf9$&4?T<CHte57O9X8yxecUt>>TVpKB_&iH`a>*+EE3l7%K9&VDopreE
zOC>v2J}edIP8wa@-j}fG@o>E+G|y<>4w|hsht()9ROM9#<~oyc2e~mwYnxDu&bTUS
zX)yfqK^%<iDITKKWnZ~UDzwX!UfZi^Hpw&VFnU?Q^CZ_gDQ$7TAKfK*Z}%0>wZ`1G
z$rEj9&y+Qctw*t8(e2=6p*%*8w4Epy7J3e&^tR_Z6BckRn-Qgns>zWB$mL!c?iJxG
zeRc1b3Nzch(s@mi{Y9M%`l(7rl9f%>RYQ&i8v6#8s5m)K$<nW2ZGUpOM~poo#-Vic
zq1yc^NQ`_y%Z&Ym*JMa(*vmVKLE1vhONgO4j#$bOtx-+A^nx0kF8`_6peq$uP`>&=
z_r=sOT#n7JlBwdID6uzT=RD%UZj8OL&L$Q_mLH7YOKn;3uL~hMz<UpO;a`L<nP@Lt
z6nd1ZJbX_!A?<x58SJsfPN}QiW1g@0pw*;(6Pf5zk4NjGqILb<VzwP;XFDUU)Y&Qe
ztCb=~bd4>PGLoQ7@S|Fb$$TA`63bb&!717sp2bU~oXf%ahvQGm3<{I7PO-Bx0)CDn
zqB?bI>pfi3i@Vy+B-}WYWGt)Qf=)lkrW+vI@h->Am$ZBs<USy@-fx5NS4ZE=lPYj+
z;^NSMp!@RpmCpR8qT1|*&rnP&E>Z9|UWdI0tP5Yr)J`|i5Wz^b$PTfWJ%mK?h3HJU
zD=F*aZ+A_?jTcF@$tDeUvtQJA(|Qg*Xh<5kI=Arn4zk$Q{-&aqcy4`V{YXs*)|&D`
z)Z?s!^k1>qc{AGla=h#;6<?BVP|!{`XQbVuCD~I*7>q{oxDP8nipEKpQOkHgr~iq{
zdr9Qb%eD6nDM$f)@u6IOh=CxMW^1O`lSi{cz2wX(gn(FiPaAIQVA^0VYSL#d3zlM$
zYP~yZp*z@ZJ*v8}A{Yvb_j3uplr>AjlW#fxjH2IRi%R{u{b!nK6jZC9>x8&OM2=@{
zYRGmZc!wa1)v;!^A;)v)GZ*;4A~$F=Ob|vVTWCC1V`ZuYpG>AyFk*h7-ENY}h0=&a
z(SkYLe8KA2MzuFdwyC}HNw17sLluM65KV1PIh#w~lHY9QO*~3v5Q()PT<-3WoWyrW
z%pBV_GSJy@v*z=@$CuP?YD!YMwtOvN++Pk)L^rJZQ^t72bd=$b^~c`jH%ffNtkOt_
z8dV4<lMrs@2B&7dH&m-Vt~^4P*+i`yLZMDuMIpDHP894^ZHt>$c7I~Uzk`cr?8$#n
z=xJHdnVS@<<r*3}-4sUs-ms3t%y{f&FVt6k-CxljPxHlP6A?cYOB;+L<`}_2cVYJr
zAs;!yPxH%bWZPR=bFEvb6xF8$&+kz3|3V?>;MHAuDF==Sqo#JmDaee7bQyL%3AWoN
zp=Nt%G%iLaG0f$%_a3Ltf}gdJN>AqjPvWj<yC(&i&=b{+Aga;L2=%SniK34#>u&?B
zW_s!TTqOl2-F*YIOZ8qN`mLZhM<uJYg<M>IS*kS|+1+b98rP<y>f~L=K5@lx!ElX6
zjb}-w^ko2l5vL$r1iP*0M>aaTZRt=#Yd1Wx$S}bzPHgQ#G<=~lJg#3F3(qX_Wj6k7
z+Bf@D=VR?*ko(4|eTteqNpH$PDwgg=E;%EQZ=K0Ed<-?z(O2aj+<Ae;ySOB>Tqo1w
zsD{ZAxW~hKhZFBx5MEJG^F;G)PW6qiNSm*Jbx5!WbE&cDODMtR(|W2d7x_M&866GW
z_$J@l({5=CDn<<H^(9H!D84G%-<p-K?eO_=V>WSMZQsvF%%XKkH#Y~}Jge`1Mf5Rs
z;*+kHiYih}A1kY!g$4CZd3}ks#S;9dxr;|Ou}RhK@T19I`imZd^H0rj^xQUToM%$y
ztk#_`!D8M#fn!&@-*zmHT}JHbtck(xX^JDVnm>g#OI@m!`uu)&tld<)q&U&_oR$e5
zDeF6(iGFQFidA}?fs)V`2g>?#_MPAtRCEbC*W_8-AvNi<S67dcS!*RoC3phPre2sT
z)1h4sskE`hi@)(do>7qy7}a}lt~=N`g>N#HOAbC?*H;<!W9PXX^7s)FpH00@z03Di
z(y3;a@u}3<AU*rU(?#|1H;H+~MXnu-GYX8#Cn<d}*j|zu8Y;3eRbf5q&1b(lyl78)
zwi>OxGN0@|UoI`c2;JP|*JfHE64A&FcwtT#;Hv#rTA!qKwrrF;2ovQYuLcS_vMbGu
zmbNNz8q?&5h?cH4cLP7clSf3?ALeYX$yn+ef8H>p>3y~+aZo|S`*2frbFwYJTeaKu
zt<{gLXeH22h&<W>b^6_^etCjx!A5t&@{R)Yv>1sBN}a;quJ+n=sZ-Uq@37q?#q#cW
z*FsL59&Pxq;OPrKzFl3d=K3Z~*e0tb6~&03U>KVdFnaCPZfD<YCu_0*IZ@O@-C%I7
z`HIO6w`6I}Kn{xY?zTpAFRPoD0?7OqS<`T(eXV~l>cwWZ71VINhue)Ooe4(%=o-8H
zl99bPb1?~xJuGYhvru%7wNl7a3rYWjbh9}ftCi~@+=)J`adi8@cbTKl9mUZz_Gi1-
zz^`0uvs=viSd&r$cn2u-Ln&Ak6hYZxMCy?g5<xfh-G&-f`l!^BMB}Op#kfuIZWQvP
zNI$&BwWXsBujtnuP82Q1Cbxo7&_lAaU4Dk@GKH_<@4L|jwLIZXJh7Hngzd?`B#$-b
z#1vJb;^M5V3|xale>5ics=yzKBy@k69USB2fhe}m#2>|qx){6?KJA`CRl9$je8CLQ
zC#O3<BEiYR9r;S<JajMN+FB299}}8_7)xr!Ij?ksS)?MBNOr3>xSDR=o`Nu1>Zthf
zO!Ti->ZLvZIVh^-H)5^<S5z;1*h0|GwMG^sZQ%L*t<x%TBs7mWvC%36L+k@E7{Oya
zY^;IY`HZ-_TOJ>Yq{k*6fn=)fH>iFQ`JgksEeq3EJx@gw?0@h0Hg<sf#0baJruH4S
z(b9hDvZe)}r1n4~n{B#MgR@ZbTrXiSbePN3E4G_~cQ{>a1#4H!?@t@RZ%8vq$tX@(
z6bG~WgN-fUdRzHn+Q=`oh%X$=cG7}3C^n>9Ork7c8FM4;W+!>+@o12LLP^M`)N^X#
z@XvIMM$qbpb3fPDMYdpqnoN2AUbvG<!E4Ye{vrQrfZ;Ob1pAUqZwzv2OUT#5w=Bm^
zTeHqH8#9|-uT7RY>M8@`yWx?@zZQUvLUA5<Cdl>D?{V$XNjs@*pRDW=<CtlKr$@H2
zkC07JLJj-<EeH#eu~|c!omAvmtU6kxpTsM*<nR@dnomBFQ_QPbkEA^5apuukhRT=L
z3@GW6GO$2(9q!^NCQ<ukic)p4QgUikrK1^_*xMMa_Y;2myGN!$gs^y!M9$b^yAg8{
zqW3{~K(a<IvDo*MvBNsDI3%w=@Vv|?;%p10v9#}tbSEPE;RO}J;?JQR5|OfGoBX42
z=f8#RR9CLWG*V+QK+CSBy{6|bRqOZWfv(ffX^&na_0oKd$MwSoUU?4n0TMTL4?J|&
zp(dTa!nQ0z2cA(p@3V=ACTR{!a7TqL?8t9T+{8fOn%?d&Pt=UIbXltQ^TB7R>l2{#
zuTo4ptiF{cUQK7DH1NFA{%lP7PL*&>CLOFC8={h)k;fg_Eu>tVFv=rhuW~b@<n&)8
z3n4Mg7;3~e32$nj#Bj!iFX>)P(bIq@n-+ztA_TgM-&nsV{<;e07v9X&WV68{S%;;@
zLz``$A`TM8<suP5W43z|UJgcjL<n_iMKeFD_@4G95jz37A4S|m_^#C2<7=f1?@txR
zQnZC>zAim+0@x+s555;xaR_^fUXuy_*;KXac6qY;%WF}}o(iANu3)a-rIpD=xFQ!+
zRo9CeqKIWEbZ;;PpWCd;<>JdA3Itn4FKv{0PS(R7*HF|+P)*Y|A+3RPrj?BvwsnWH
zzwCz=Ngg*~VynP~9en@Jl{(jH@GiwVp)&^=Y*kuZ0E*=1PLr;L&4z*Z#;=^OT-kcl
zcvB8NIj=HNf@}4t1ub05w>D<a=6OYAft51pA~XqJ3Gn2{y!#jqiWO2mtBk#1zwnzA
zGpI_3cC1G+3sVf2+aBEDWI;=*NjHMH0ljEYZeGr-6w6~+@fxuL^oHm7NVIh+Q4E88
znp5>^Pb$RVSRU-nnh!5J7stpEwIS6)o9>pty=#_Yn(@!P!WJ91dq6*XoD$X7uF^#v
zyd?&o`zef(ID$v~k>H0e`Pa(J2VA`)L0{gPy)ZCj?1cw$;74W0fhqiqmJ<sWi5!f&
z8TupAd_$kJq>>suH5|;;dt{GZ`Ye90$~s-&dyic6XWjII;iH@v361Ng0~oD}KHE29
zt5nxYesCK;rFrH07NT_Ho<?hWkCG^qqX^2FkK$Xu?sQd~1<`F7bTxKU2^5)!tlK<{
z?Zh`KmIzko+{4^{pbFMvYqaESRKdKW=Fa&#7>}3kPO9?mxjCHOf{LizA*V}0dc8*@
z%U)OQrXMEEnaPCVYzBuX$R(|sDratFp|6FT7a#S9F#Ar1-nU3P%#Z!ccqE1R>ryxd
ziKiLN?j6hC3c&NI1efsLc@7}b$Gyd*-D-$&o#4%&g-dw$%TRDIoFt3~XP0lFo1THX
zO0r8cy&~aw&$|T=+y1Uo2O&KNL@!qmx}tdL3oQ=#Si$b~EQ0R}!CN0lmbkh5O`O?<
zT+(}gaio)8FrHY-6~!HQiJY?>%<|>$6zv8COb+2A>ZEIYGkX3!O_a9J9pu#zw;lss
zzTf|!6Vc%Rb6+fd9^an|Jc=g(G|M2l??kkRk;(t(QNG_#@|FJEDZ`DPp0;e+>4K({
z$N%#yv`)zcw@1%=v5dm29?Rv(Dl1j%-*`)hf<5dPS$255pUTV%ozDCa&oEEnWWjnE
zgk_&V?b$mLZyYjyamZv)%^HjF>g&ha3Y4cf!x~BO{^cK%?FY^(rXF!n*NQ64vJ&u$
zcW^{lV<Y+rYu|8XCL^>Zyy81+)&9{}ex<u>Gel^Ai7A)d7^RZ%&h!RLDuWDr5b88O
zkP95cKIs`{o@tE;mz2IHI1<A)SlPGn)A+bmvE%Dz`+okG<AhUlk++GwJ(&fc?rdK%
zTjD+(X-?RTTh{Pw9OW}JDfn@xoIRSiNXfo8?{IXVadjEE2(=#UCc0LaNTT6oCB>Nu
zuG8ZRCQqzcXjh)BsGvU<bHL8yntpjw4^sr@>zQ}NqEQuwG9HBL8ramY5Uz$lsn1V%
z*pBmlYCZhIXWP(kRQ1UfSCmWRtJ%4eD|Dfcxb8~^1-~%xc^K<rB)%XFgeq?4C;QV3
zWBNtgWSlG4{?hit7C>9(DYM!xEBWAVDW5y#hkWE`oEM-(fl){~Xk?7<mLVkeMhw;r
z%;NEY&BHNd@`y}>-paH4<(t%xlU)hJDP0`$ZSPkJt2pE|Fwbcxm)oO~eEF@0`2mk3
z=u35TG;(G7px4x0N?~UCvC(Gu%1=KIw6ezO<Be;ch7ala$dL)Nsct+q#;6Qus=285
z4(>#D_3yCcB7dM&Th+1t6BZ8?Ap-qE9gJL4IAkMg5NL>NL<PN1fAoTF=|NH}u2W+o
z7cF<p$QWmybswRIR6NRtZ>>M|8-A+S+V!8E2ym$0wBW0~=2Wkxw_sNN?0hwYuk($<
zTZA~fjvn{xXEQEYURCmVwYT1{xoZ^Q1T8+5kL^^5rZZkvy4hFx1zZ)yQy_trUw#KS
z&87D=+6zPqs@_zXJA<Ei5ycf6d0|Z7@OP*WT-Wu~y9ng0@RPVjoa(lg@~<M-u@dRH
zes>K4(^VmXSfweNp_e5ht5(f(nK*T4FL$hQXy$(|srO@fm)wl@o*2dy!JDBM5<mVJ
z@O~V#RYbM_8mEBl{I$Tq$5fII9cx7}%?)1P>~lW!bH=@mw?)Ck{v~<AZ*9!w*zsgR
z(hq&HyF}CwJ9d#Qjmy714|HP-vrg)CoAA9ZXAWwmQL)&16Wnatsb8R&jPr62we-4^
zt$#*gku@x|LU!CbiGs6`c4>Ty-z3YRF>JKAx^=v3?(S2(5{cMUP!tFncHK(AtL`xs
z6fzXr6VQYhGRI*zm>C4ZNK~YT;`x0@I1!5$ij?a2@5I$}*nL1C&{yC^3IZ%BfiJ)-
z3iQ+O8#5?Qh=l?k4s`b&fvtME4^coM9{S&_%fIi)^DANj#f1!#L%xIZ_cc)A@;mg&
zL7-tL5D5RyJ81b6gC9VV{r3V-e*d|INnz^_AfXV@0r}@Y{l+ywkoFF`y4L#UHoCgE
z>)4+%-E&tikATzD?L;6D?wyO`YJ%cKF9;xI!T{Gz^rC?xp-?xta|vfBbE|)j@Bhok
zmju>T;7b5;LX63cXpI#3`S%O&&Q(GHH(UrS3KwGsM|0bMo}LC!`ab`}K=${536OK)
zv;ZFgL!fU1<sHc3U5K%plY_pYldS`*uCBR_xzpc6m;B8;(tN<D04(6F!0`WwP~aU=
z|MK)5{^<(w?<h&7expuwK#GdM>6=BMzaeCL<oP`dxfkX1kCPY#>votwAH1BO17wxo
zI`Tj!&VJ-QECYR@-?6^+KTc8*Jel9x$~SR>?GORWVdCG$?2tUh*!y@^=5|glf9A`r
zx~8HvKMx?|ya0uFgK&^?9}V!qwluW;r#%^jI)&frzHYTVD+E|mK(f%>QP-7r56{rn
z$_i*&YUXTh>+q*W#otUme^hE$VD@H#bfdk)?8~~BX>DxeZfI-+WX?a`P$7igPUCzm
z9fm6^2-HdU=QQRN-w(C^H<$mjp2+`}h>wdtiwESgFt9>AxFgZN?jD+vzPXjVwXG9G
zdHe@?JCm?+j@G`wOilx_zB`jkjrSpq?STHFPA2A7#(!2X>8(ui*Mq^R0Mj2Rk-IY6
zTkl~So12<B{jNXDzmT^H<himGzycsY12XR>5M#%ENJm3`J99^;KZAYCs|<S+lLwR@
z9B?S(F0ZQV9-fK5p|OFjt>vG*+c;w)A{<))`fPv%x?3(4efROK%ndF7O2uct<NW@M
zF!B*lE?hvYsqe(uVdy@ZvyF{A&;`}}ujS!3%-0Jd47Py2dO+XZFn=1khiIyAZ45*N
zh_b_9F?sr1<=MgGAW9$}uYgT};*QGb@%xy@HpUL-|1@faD3#kmvqfK2djkX30h0Uf
zpb_Wp!`a%JS{d6}IsYZ_cF_Hcx?UE*;akAH=#N1!Ed76pj>h^9hGu^T`gYL7kM=vx
zfzY4;%)5gwTD^y9X6tIG|92ir|DMB6r4KS@KwPqbc_h0thnMU3;oR+v|5_Dqx2Dxd
z>59*Qwgv!rcWY|gz6WM*qyOhHZ*^rWptfcLQ-=&3T)M048*ujkvGxAD%b5dRc<uj$
z-4<K^rb%}SuojdO{8?<h`}e>sfJO-BHh=FXxBDg@FMIVmFir{}Tkh_gt-tQ!SsEMv
zRk*j>>W*PKemh*z0P(!L5L{i{!?H4WF}4CWr~f%t6J1zDDuDG6*wgRw9Ix-;0cECd
zsBiLD;oj;?@w_521bDT7EmS)&Q2#p+*5+=8R<_Q6-E-XL$YFy1(C`00ae$rkZjO{7
z-N*Yo47a6_hgjCM3rt-s;-96kj&>i)-O*0p@vmyV#kGk9`gs9aA^<q@+^sA&tb1{{
z7Epf%;&!0SU0d}bfI3rP^6m~4j_4kojlo~VaJ!#rB#*OK19KJ&ES`7wvqh47P=G_H
zldXZXp{4O(`;Xl3x`(1SZ<Ykq@dE0o?$rGO#eGQNe4y=rk841zp0~gkfW4dx0F3zy
z7)yN*(80i3-$CEV{2zxS2;glv#H}XjV}K%)00w?H9EP;_0ga9Rm%iK89cy)_s}rbK
zUz|Tz_h%39!8zJG12?GuE0b>p-fdM6d53DnbRQ9-Z#~3#Zw20IHhw1}M=bZ@?sguB
zAl`-~_)Z&MNP$#b_Y)zS@<ITme<#h|KCO^Id7gWL|LG<J0lQsT?)F)PgbnlG1H037
z5rTJHZ>x9u<3aKah3@73wMO0EOx<k?1&JybyASodMbuw6Q@43?x9<)lEne~-8e|t9
zh;iNq;coX8NZf?<J-B~+vHS_U+nxdvb}f4!?7z(_AYixK;$16hNSK1ky|Di?riNfC
z{Z6&Jmc)>(D7AZ7|I42k0(+ZucYR|avHTkMVDGrdLhx>f4B=1t$DII))_uJHm{mfs
zZu8`>=^&)GNS*sw5DtV8xZ6CrYcL0i+tRxS_pjX?1ngGZT^kuln5g-EFbGo_2;8l<
zyS5*YxM|D#aexWPZx0X%*sZp^=i4D+T=w_F{(IW}Pu$&ui;%c<*ZXn*Yu~wDmhOIB
z4~eTzx(D~45APwA!2mUp$VLlAB9xm31^OQ!jVU?1p@1M7bUl}Y1MZxG-4pmu0zN_+
KPY1SA(EkI{&%Q?h

delta 64558
zcmV(&K;gfd@dk*?1dt#N0I}0TA8%w>v@HMtYmqG|2cQqaQQY;kkz62uNy|<$nP?(s
zJ+YI~CQf{8d2;5;T3(Q7iikjfgNJ0@IoaR7_2@SmB;`EL{jfa8G6{57S65e8S5;S6
zAO7ia*VK>adHGnBH__&<T~+0S!)Wqw5-qAMFRxCbu3b+4f=?biSk~1#n$4D7+tp$=
zi}LlRs@o`?H&xNKV#dFJA3TuH*J-;_-}6d+s~YvK7V2BG>e{@}zpBNxX!WmdzNxE)
zXmq{iPJeClwFc7G=|arY#Wf(Xgl48?x)#ytX%v@jeDWZIk1V-p?>3^5{h4CZje7M^
zK0GUz)yoeEvaM?U_DZy~9Nx@?diCJJgGG@xP4rD(h!@puSyX9%ChDY`|11{mh?_k)
zIKbDDc^^@w=>vLA?=tg_>o!qZi>O_Rs47JR#A-HG+2nI5TP<~Q7$Ph~Z~&n$Euxz|
z6IHZCplO*!w{_lPsT?R&MOoflM?fBg+cnVGDn;1;>O|9ujEU5aqjX(Cm8Jp$R#3E#
z+O)ZDrqNj$t*ZuqD3~^Y6TT5JcvV+q(VP&CHlijdFlJYEx}HYQn{jlN--t3wC2H^9
zd_N`xtUhOzXv(;auFL8+TEV}zA~2HBQCbvKr4%9)S%ia71ApaJIVRlTW!gsGsAbg{
zdP8uqo6!9Y4i&-y9Pl?{ToE<aYA`D3>?#MOmjlDHN^gLFve8<=WXZT)z^=M&sJ@2+
zv;l;v#Z?aUtqF6e%k-#f06UF-TETZ1oxCCR<S3+uOg<tj7}f&F&m+nu>c;E6>i9A*
zvxc#(1u}612q8bD$VpJ2p+WI!bPk|gAd>vtH8lnn@c~NJwe*z77V6L!L*-g51rcJ5
z?8tzCm*~BJJ85Wuc&b|vouS@T4NieO0^`jAE+3v#Fqsw$v4N3_imJsS-=x5u0>*^7
zn#Lk3D<Wx=f`Cg~o>ephY3M;DmQ7oCM48*XUBL{!>eixc8{{+S=|aH3LR&D+7Km_K
z-$7dhrrO{G7&HJO(u|1R2>lr6dU})Q1rN)ruDYv#6}3%>$^mHGI-i52jh4ivBEW4h
zsd*qh+NMRrGfMFy@=qg*Wt2AlWCR{vte|g0?MS*H<0IM`&PZq)1xHiS>_KKuql0xv
z5{a9vX$%kK06o{8l_WUf=O~m=aGyl_^G-e@Y|;+!DcgL({I}_9;0Xbul!Ok<pIaD8
ztMY4q`Dk}#Xt@+^y>MHO=vk<?8cC}BuPzs@RTDlhD&nV2otLl{0#m0~f`+JRGnh17
z58y6DR~J^*Z}1UUL(YZ?3?zh^WL0SMwlEa(D$q--aBh>sFMRgG2D|Fk^Ag78Bm`Ey
z^l-^Uv#4_#!;@Ym>!no%1)3S-V1HE+uGQp!>#l$aX>~+WS8=T+FtBQ)TLa1A49h>b
z481@dN7yB2MZV5e`M>8^FhyG!+O){qJ5>ioZhgnKU>Mu=#vZ)rT)=}=PEmNj0r7|(
zkr=@;4s&?Xs{S=6swcFDL<j%#fL85h1Ii3a3zq&=lvn4gvZ{rsYnX>84k?<p+Q5Q;
zW=O(?WV?Wp-8rfz%QYKVR3myj#un#yt!N%MV!>6*s=5(1su4o9__`LbfPoy9FL>Mv
z(@E%~E=->vfO&8YU6RU1(*R2mRinS3pTA~oqt8BkAQ1CziOWX*UuMkT!Vg>Zk!3W-
z3R@sCl;oEu7JXp)R=AEVfYNsnmvSV3p9IKoK-NRII+2;tJqL6FZ}mpkYfzz*E0a)I
zkp2UcUe|WS0SR_lBATbNO<<h60wiVArnMZX*YXhvSFvW{k)q1ox-~?pCwZ?1ywl~O
z|BIC?Jv19sC6I**Xepw{6PG`y+8cUgkpRarSh=XKT2L8bU8z;&@2e|cAi`UJB~WpU
zZ4rs9ZdEuk??v8V&kYH2#o}VHFqeQ;+%OWFwlE@^sP2e>+>d}%pc~<GJ;h=+Gk2Q|
z<_>bLu8hk9j^Ouc(@xf4CoOZBKr5k?m`1c}>eK8cmT%MbMuMcd<jT#T@R23NthWd;
zji&Y!glZt6hV*koV4{5>VfiV4R`B4s;@`kAl0RR=kTDduDi~=z<|iQ(^johgOhmV$
z;l@U5o2D`=>I{=v3+IXp;Cp;Tb&WP<ASiXlfY9&EQt?(kQUd>?ncqBa>h>~9GO<j%
zqCJIQBe!~yqOD9Az_$*`B++#`ujUJDP{0(f%HT~#R_jT)(XxZ9k*lhIF+W`kv7s&*
zXy3sjZ%vHSC8&KUsM?8Cvth2XSjfv6vAmJDZ?E$WPlKpg13i(m!Bk-F4$Db-@?lHG
z0(Hn>J)BZC5XxH7pGPm?;TwD?_3g2d%tQ`lwr=pz3_+lpnV0mGph;~3Fqo;TCk81j
zCT(N`c<E6|8SD?FLW#<McXS&40G1*>nZd#azb`KN*Jfk?QebDWwxMp}R4lvo%*ado
z4NOoWcm&jQrjl4jvsqr|?QE8yu0ECsNxy<b_3K(7EmB|QmsSf|D%eSBb+WoW#Xc$^
z2z%=G%sh6wXg!v)cebjk>tKlhHIBU^K27}hE@dv7yJpHu!#I+E-(lgnykM$KuSA=~
z)_N0<qi?|O5hD*XJ*v{S(r<yo;-jRY`8sN1<SnbhMJ)Bb_|ni{17+)acAM5EOsk}`
z03n-ZVgvRej1!oCYW+A$Q~^}^@X2JN6vT<v5Tj+0UM07ye6b=c4w_qZPz<W`R1h^l
z5U4?pv{4d|xL5yw3)US_dDvN@nM!KABv}&rYjG41fkh5rjlAgg6~?lw%P9Wg@$=ZO
zf-W@)3M)b<+<FZ16gpF-tt{AY@)THLdhFH|A2xsmC`~qd2pHDEVxVCq=zN?4FK|=i
z=*jWZPp8Kt(`Ez8Uj6~OD*jtMj=#(C?<@NIE&V;Gzpv?k?@RjoAN2R1@b~gUHD_20
z6cA8{VSv3}MvvuBL?L?kP-ALvQA9nOK3yIG-yKDXhBlU%ZXHn2X&1AN07I(eW@n44
zD?!GNQPie0U?q2i)zj^RJef?UlcTtg=bstL_~?`AljYG7{V;u2jhn7(B%+5f62~+U
zySgLVG$Cz&cMNLU?HE?THZ8^x@P3WNbGk$CcJ<|CGBI5Nr47ajy^Ryrscmqw4Pr-h
zX<^|nku#5<oJ5DwkEMVmvjFu&z%XU|G>7(=<_pyYAP+z`Jl&>}c_l5WS0g~st%mgw
zp5dPas?J~$2R_GdfEKXbDFD9#?G#^*qdy-z(@j->h@M4Xe6bh7^w=Li%}($S4SJyA
zodZkurZcp@2--S0MpAs_&>THECiX@4o<D8^U6ciYzrha1Z|=%Eb>ycv*8!pJ%smGQ
zKbsfT;(CVqP$G(S(}-+_RzjZ?rMN``135s~;#Q!Mr_*B)X;W)Vsa%Tk3`!@a>_by_
z9DVwK<cm)~IsSA6gi=m}t|p)Or_rmwIs9QErRf`@CbFw+d*pMw!qx_6H$ugpDb)7|
zud05jsI$FlUiKs{zT+vhWuRcyZ-l)_wzaExy~YBdcj1bSJ1k{JzCm=ZQ{T~bRJQ0l
zg7(1jp>_qR#E$YsRx{{oK(Ja&=~q(6$yfP*=>H_|vq#BSC-0~5|LChxa&i6Y+w<2i
z|Kp#RUyc4pESUwcvMHm%ZIgyt_axR~-;9z@3c&QLuDVU~#9A}gVEp8pBB#>4qT+%U
z&#%{4?MD39T@1!$CO-Huo{nHCz7=&c3XcRV7g-|FcxbhvCK8TDnq}VJn^cK&;f~6G
z^DIMByBir&7nb`a=n|2dKiZICU3i*qHc&9hEkT=$DH!)NV}KD39+qff&7_5;vD|C|
zSVpg*=xbGS7|F#d&C39mpqeWz0lt2@+3diwt)ylxk(17tF`G`JDo$B%cDdblWE(#=
z@*GMMkWu6WkJji^wQ<EwE!>|)4N+EqAEErF2VERH>`Ow)=-jN+6JV{)yBrOA2F&mj
z2N`s^E`DD^L4bOBp*nNvl$VZ~Q{9tC76_*#S_5-30sslzdr5jm$vBF!yU-$Z1D7MT
zN^a0`S(U`C7sr<%rar-?={4=HM^Ay}aF0lDoFM@6d1>S3v9WC&gdFtw%T$zqXj&y3
z+GeXoM!(Sj0dXI*v)CfAVI8XzQ*z-X3~*Q3WzjXO#N#DG)#at#yaQD}l1&0bNaOC<
zasRDGZvRoU3=|E@8z7tX==GeD6}gT4Z$+#8qmL!C7t&G6Zc-c)S<@Mlp$TdGWC!LY
z%Xb@+7m`6nFNb1G5tpKvUW>Ya6vZb`r{9Y9Yi{Bv!ZPuIA4ZGWE$&gyHpvJyo~u<$
zh|Y1p4y7r&a-qrpl%4udSPeY@X;Fird`BV{aEQ7xr!jFSzyN-4HGbzvf}avu&S0g+
z!GIG5vgP^9H?N2@ff}N`$?K{NI1`M3j^7`D^2z1#Uyqvi$H$+eF4`A=*5`F6EVck+
z5tS{u5yi6Kbr`M0;<{1(G|8K)Jn9Vu!C?fsS|>FwfN2dXN(U-RH#&O%KAArH>izps
z1AGv6mW-4tVzVx+`E#FXPq7dfdUN{X;`~A|<B|(c`m)>fT12-i3N6tdJy>`F@H)bE
zF|AdM4Srq-Tq#jK!`<6|rN8TqAtG}iInI=2e$~}-SDR$0F-_dvHIy8{cw_c&%9xOK
zdPiIMx2T7R4;XlW7+cE=@+u)Nq2NCH61AYQ^3s}ZSD|O7=}J2vV-LuqH>S<@EXyh4
zpfGVzcId>^rlq4UN)ZY$5UIY#sQHnFyy9ycMo&C|9GzDUEIclMD5Uv3td}ZCAoGA=
zB!H6<h~N{Ps_<ee8O9nVbV_Ftg==XtM`Ty^AT!B8{K7I4d<iW8U|uO)6rNnd3ZZ_(
zN<o{mei8i(F%C8$F?d8MOn<JxM$_Y@%t_5D$;06mMx&wL0qjM!-T)vl34+j*K;^z8
z&45H7JGvx#uuy4#wpx%HIkXGELXFt;mz`(e0d2#uT07sZ8QR<70}D}zt!Nm3zY$k2
zp~u5DUGHUZ7zW|ED{Y!LCb;k=(jw7|gyx&w&iN-sJbTXUA=M(brqgS^vE5ViC_3sk
zFA7|rQRl(sh4&@mpPs+@;p~TRPYj^5rQQ(6AfgsSFK8Ek6+oNf8s4HRfCI81PWjT-
zqH-MWyM#wnDI<Tf<3rVFwr;Lwyq|m8(F%NV{MS!DS*zCh$=Wp3A3hgEzn7A<Gqv<}
z@lg{=->+47+>^K7B=O*kdSgg)!?*MLORP<{7i@qwNFR(2W6?W~=3T3XuW?42hw|l{
zH$T2Pfm!H(4+&^H6#5t**y-;zbR=)o;5}J0diNVT0wMclY9K`w!Kp-e=%RtKld6vc
z28YBNyQZjaX^P5$BPXckA=DmrSIsYQ*Vof*WOxsd9HVvQUTG^h-pAGP;4w!)p}g@W
zt&Sjq0cK^+8Y-)*T;5SJvN0ux?N$o`-Myz2D>21?KZ&h5lmF0l3HQ8-s??W(JtVIw
zCW*aGV{chVKn^QXg=bbjJtqWW-5~iB2OJ=4k>+8P_#M#Bd@f>Z$e@qUkk2IxsQo~9
z$lp-MXgcW-7FAM*JMhM@4yp4rz}`r6sEl4saU?a!Q)Kr^jL+Wpma7D2<_7oO#0Lz+
zNZ87M{YKVJbPsqM<9e<t<PE+Yt`M{>HCDcrqOnE400s2KhOVGwSF=5cn=cK;z-(wx
z#w7~ec6=^QCh)}+z8pf(wukOuCAKiCifpEejmgg)z!7TobRD}Q4wqfEuVHu-13xe(
z5)*GYY{(<fEZnyaiq_m^y*xSlkrb10^y6E9`ZB_m1!Kbc8lbaqb#Yg!Q1x1}HMA0s
zT-tRcGTgx&M@8DSGZmmUTV<ZSlmUGhu0^h3C!C9RKBWKEOdx;q^jg^FcK5Z}eYt-t
zEDkQim8yq}-JUA84|>=#b)Utt+=@;lh><@<sOYxc231JEJB-dqZ;z_^O<r{k2KSVI
zH))Y)6dBY6TiCgP^kxIlm-)hQL}INkV-35<uPz+iAq!1m>Qhf=_JaE)Q{t0l>GEIn
zM)1ic45g<v+l%o-s`6<KgxUfR_Vxgbr>KiBP|?4n@H?y%ivK%_VvvdwnK14;U`^>B
z-8>|FLik&ecE1t!cJv(M?w}LyfeMI!R2xL_b0_K7qD{%%CCs&#;XauEqepKI6u|xO
zw?F=%J0Vf(p%0Z?+=(!JGqmgz_Ez!m^4>>PsfCx(3saeq8m6j>^-_CT9B6`m1`}Vx
zzerKyuM`mAZ<#8@EpHt`-bX2sMfdYJFA9;?pj4G8#h~NwKRlUVd>7xtt{}^QAoBO8
zdWh*bI8+d3unJ<E12}FQjw1?ig}=a%@b~5UbG9k^TGsu@Z>!P1rPIUbeSOq?6i$ZR
z_hpZsAKORH+T!TqyQ*uuxtL@HsL>xqvMKJ?Z&3Q%sF|l4Me|6asveB`69gEbTca6{
z%D<c?^h|i6S7r(T<AD;|vR;FKC9xN*XdsZir|}!vw3$;W#|%r8)IuYGHmsQvU9wol
z>g+}kWj?WivG!?0x{-TuV_%HFB}ZP3)~Eq6g^mDp>`0<Wb3_~9N7-mPoywp%2giPg
zP=w9o&W_pt&Ma9v8x*Em@T;96S^$<Eo)$5noN?)C96Kg>54wSi{WJJ~_0;EgygT)h
zRK#5h^Z)|q>MZFtH2O4U{d)#LnUw+?N|J);<xBanG#`esm|HcmH($TA)P95zz-*b9
zd9xbEN6!c@=ndXStuk8|RMi~1;XykjYU+X9Eqv`B<|RDEqopQ1I?&HERf;$A>Wnsa
z`D7-Hz!f<LWLyoN1WFfwtG*Qzio|~8c!M`U@3a<L?XvW$8J=`?k@{AYf;>#t=3-De
z>CGOs1xY|DesR$zrj-Z7g)d?l@x>~>lmV2Mmhbw?@emjjgo5uvjxMi=7d-)7<HH!-
z(CRn#*$^_+8}=n0la|}oiC29@?u;znMg>!aNYQKHC^XuB|2&v~>WtZpLc0L&;8tM(
zp<ZR~;%6l9c9eC;yGcth42X(Fx}IlgbUnT~(T@#t_E75LAD)xqccYz?q}o}#WjCX7
zz%{~=_#!*)J45|roN)j^-pumsG`T)K?cy9{!Du(LC)Z=$Bm90d8j%+dx=IIK>^f6C
z;MXK0Z(bPR&%kznyifSOJZo|t`cB-@;<LB&21}O$Ong7gz?)W~xJ23JKhmNT_iOd6
zq?q|dj-nCNeLM+?aT)08!Py1H>tOKBFj86=&M3BV29UMtG$05g<(ws!%a==&&B5f%
zFiWpFbwunzVL7J;_C?ImHpZD~4s(XYn0c$0GJEN>PJ;S>^B*U-iSsZ8$C3OGUY>pX
z!;f!Xe*OII%TZqjNV++T;{np0k$n`!2l8u{vmclmi8iSCK}9NS^$si8QQG=6YH**z
zybgBUKFsR(-1OQib^mUM`$`>xVP?>7XYD?@z0Ek3TC)Ln(mDed+Ky3Y<HBM()9360
z<kKhf0IoiNkms+WstFnQsR8cC3ZMux{tlkFNk~Y^oiuzm!{`nY(D-Wv=6b~u2-1Ax
zR(mnUjug=oS4An%4mwZVt}e_WLoz=xoOpIE?qnzu)u9@v|NdoQx72LG(6MVy3}R;h
zK><#x7rV4>1tn!M+n#63C{{=S)#;3vQN{G}#Aa%LNk)YhZ>7=_g6GoHyi`F$R3^GX
z;mPN2vrOxNs8JV|;J3SD^AXd+n<mIM{X|F!mmPVr2}1$XZW%g0m<uD?`r0xV(!tqA
zP$G46OiF`Ag0c*8=<*iLi3CPGFmu(B{s#lqZ>##67Knb;@Y%m}b-q7cg9TYKE2u0I
z>MU%3Yxx=1X?+c{0e{xjH3oHs1C{(w7gLTWjD^6!H|XOX2bTfH`VgS{^b~F;?2Op~
zrHoLqJVdB|*`-xnslCx*^o_Kyv{jj4To8~0_?NY;e4b<X|4MU&=P;joT!JN!8H&t`
zxUO#KG<_|?fkbm?q95Xgs5N-{F)bG&(O8auslmXw;pZ^=TNup~+(nDdUdo9g&r1ej
z;CL#6l!iLPQDFRj%4r1>Gn2!ewMXaQgD~S6hM&eKQ7m;@>~!4MNOgJCUSPUF4MVA5
z>0f$JhXckmsC_zzv9MZr#A*fDK|lTa!m4}8jC=NS_QUg6FNanjUg-F!#Z1Go8AY#u
z1L}*5wA4oRkrJ(uW;6w!%UDhn@0d637L9Q6VIvl;$hJxiR$47%B*Wd+OsHTx9cLGc
ztm{t(ji(AbMWs0$Z21wvbbECG@o(p+yQ2~BGr@=Xj^f{TIa)jmp~S5vUK8AtLMKxH
zaO9um3Go?7ok8le5vvnJ8ief#ImbAEW(v&VsV;Y&bCF;qF^NRp)a88&C{-1hgARm#
zSIS?8$H=S!n+6ul<lUR^o3YLkLn&dJkrhXLCgbuPox@T&Xqsx>#3&$2=aEPg0@nO@
zdTAksFG*C<lvSmP_gI3#vPTWactr7x*I)LD<Fx8Fn63+Y5jQHC;FzD*YX6jf@^g^%
zX0y-L{H^v+RZBgULHi;0kxGY1!kkhUQsNjGx>VD9c&#<g4iaCE!n&of<wfaL$xUXY
z0WC!{FDk$qdeG$6Z>?z+pdd^VkA8>IPl7WfN|n=6Rpq%h27-FeBJF56BW80Ckkn^Y
z&cq;5ZQ<_d_ZIOG$;;uj(D^8T3hr54+6o>k^V5(=DEP4?L}<Yb5?*spfch$hB5C^M
zrcgOjf+W<!p*JO!4Nc}{L~6F%&#Y&A;%6ThIT|UdVV1Zca@7M*Yt_uJ7>81gI7P}C
z$ikUaYS2Ll?b^D6)OIdMLBO0+D_{#d1)#4_@GSn5APhXLxdj5ehWe9#C!?N);Q%;=
zB7rDy-LFueF5IkP?qSD)u=4u=v=Ho3-KDtv<@M2Lhl7iw=F)Ioe1ux)*HuxFri6Qb
zGVdoS<%yl*^GF?sU5^ezd%R)g(5!xAmrce7|DFpKd3~&>jwrh|?P#bTj-Lz}f_tz%
z_wG>bU4yQX<Ss6~;UY_aew+Sp3k0jbdz6<e3fKYBBX>e<kzReHK$^<7Mo)wbTAcVH
zNu>hsbS*m6#LIh$8kl`<Ko%}lX5c<V&JTQRgPZgDWCkg1)71E3)?I^(&vk$CWr$5z
zOLb!dtOeaU0W{EK)pQ~{{LAN`2kTL{`xZ)wJA+g6&X{}yRtuGX+H-m7eR_7z#{%*f
zu0H4bqFWtSw|nXPg8Bjp6E%5&eud6rTgaYY6utY){6n`%>X3UEJy}x2Osqj14~%V}
z91_ubpB`WA(BU*MXpDDSu=pW&g`?A2%{rwo_ocDF!tik<ZGR^5nGEfeT#X}fVg;wj
z!5P{6&6c9Nb&ExR=1#yI93Wc@%NWtlo2g@i$V29yJH2Oo81q2aJp4(RU3vF}h4X`A
zRLFfrhpK5~gVTKqigg892Kq64tBsjNLmS%iw-zRABM4(s<Yv(J9U=@+MpdX+ViY}#
zo*sMQwEbd1=ocqXzWDr-W9z5W*kE|bSqG8D2Hbz4&NN$-3mhi_-IFsMC4Wq&PqScz
zgdt7xSLp|xu=5St8zQq=wzn1S$B~3$I}SM^bf%9$JJH}hI+%Re10E|W?+IFHChUez
zcFui3Lxp|dqG5VJz+}qq0a<5kHHUc6aTJ^9TjtLo!F1()P_V+FJtXWmAAyEleIG;=
zv$<Gk)*PH3PWvV_dUty)a~<wCuu4!l^MF%yQ9@;kS3yO#)Va_p`^Z*wRLSj=^c+Bc
z%(0^d=U0U`1ZsA7B*blZcU=bqM;>e)9siujP<o?~1;yt#@BpK8(#?jpb<3f5MnxS6
z@hj10c3w@Z4cYh3NXj~(HQmPYG#IezX-pRe#UtJtfg-sdXyNq2<R98S$F~mb_N`ep
zs$ZuiZI4oh>fUi1`)rI4ZSOJFM^-$4xo4g`QjcER`j9Sm={xt#VHiIP3EjgX?b~%)
zrdPa|ERPyDd;+@=Hz}y5XD>al6fb(4-f3&Q!69yoyXXpU$l*jKU89cdccpZ>)BO=H
zFomJ3a9LftICkxd1dsCw9!dO)d9>*p=C4%s{3d2HGj~8O^7482gEw`1`)%caAH2mo
zkmSzZ3V8&Y(TQ1fYX=^a2HkFh&Jg=ZlQpTQb+ZAjIf<v_d&IOAG|km1D5RD9WXnEv
zZ~9xuyqptNp~~CDfKz*$Fo1%?vOto-5(mfw0aw`Gv)}J@Py%=d*0#-m{OltG?4<U;
zu>>&K4o$D$Y^I<QrzbON9Xic_e&6wW?<uTEH#`;n!TWX75kFh<+zd@iO~c=LYW6yN
z-|2~;KW2(rWZ5-U@pI4V>J#F(n6miUd+AmOM~5;A(W%Xz+NJN)aFwp!km_Tp>hz~w
zI>?1sdS>K=6(oM+#DyDomW&Bk?h?!a7Lnb^*Sv+il7!ognxbRqZlk?_bN&+f-%l&k
zhh{<brbDxOoI`ZG*ZB7B;{NXcUN$m~7#1`FHf$+RYYYpZ)NIht$MNXmsrPJ3&xUqX
z>_!H}my1UH^X96r{L4ZiL1u6vRBTCRgAR|Q<K@DAy(7BN%D$IR-5tYc_;yK_#P!pe
zT|D2t(&vH>bB0T^3>dzD$mcV8vE>wgl3d*7%}^f_rc}cv9Lml)i%NR299X&NI@9T*
z+T3ByYW{Q5EyuD-zqMgXs<ZHhig<uLe8%-72|4z7!`=%WO1kD;u(BoVTK{yNxk=1b
zjrL6(6A&lef_4B-BaFBDby~QBq%x%GK3WMR0xn_Q-o1Pd>cgIY)OV%6uZYqJa2B`W
zHe5dE6SVK%d@mc8Z&z)*nJo%D1vABywt7!yEbrKzirx5a_l;;nR3@7Mxvlng!mbhB
z?SA4JyMJF-WeaGZSJm4Uv@H8|e@qt)2T-jr#JC2~Dku}rA#jFz%J7E=lJVJy?=`6F
zsVX?eCa310&h{RE^$kv9tNSn_%^NF;`2E<=$|(-AV-F4t(G8n7mqgubNa-DJ<LTL>
z4_nN<U~J6ms!y%UKtGBKsg`)lWG^cOWmQsOqG!<)Idp)(m-N>6b<!kz3nWCL@=~Y7
z3nZ&`PZm%RLaA}z!#8($)(#)x=#`dXmZyObwq69j9N?gTsMt@>8k)Cpu%W>2&OTu;
zoD2mj=j(2*P9IzMHi}GM4^}hGX&lh#D5O_Ew!{2-nJcY*V|&i{xu9~vfpzXd@vhVt
z%~>o@iG9nhsh>D2C!bIjUdSHh1&y%|E^4&>*gH^(cec$~XnMh%$ab5JzIzIj{O`eb
z2Hxy8Z&qV}OvfdW+-#<G$I4GI4&SS~{d{k@&age_^wX;qwiyct{P5{&^*Mz1obCmf
zV3A`2_vlXG$q#|fPI{PyBYf_Kpqv!}@j4J<?`7&#i6qIC5z}>Z<?JT8T_)W~?}YR;
zDShMLeRd6cQ}u<w99EbQyUwrlwY#`3CUhyx<QheP1-3O(=_$fa2uc};=XUno?HNo8
zx9|BXJ2?clYYP{1m!F+b!H5mY(^G@A&${)dCno4fAch->ddA<=D>2hgXE>ulXI{+Q
zlVmoUb`qyP&7fg_U)*FP^inY4=P;sl?HsuyvF4j04ai;^l5Q>~*%Z@cCl|4*i9s%t
z6{`Jzd||<9LS~NMv=cXFR}|zU8jZ#>U+bv_@9p`EAK#ttvC+1BhwaVztJh{B%0$a*
z8OH1#D0~kvo~FoDa(|Fx;e+Ee3G@rjKc2UDGrsK*?c&KqWhLL5vRs=bwB-u5`0f#R
zUsj^rp#;!_GBv|f%i|GX&hRDE(No$*v4GHjwJudz3eILvc~dZBXbts|3i6-_r}mM#
z?2+H+b{0K->2P!|FI`uM5cGJXEC>6&0Awl6?DKD)pM7T{kLGEUFJ@_dMPb!8oA@+D
z#=&4WnhNm*C6C3*PRx;+KErE?)$k*1c`|8km;YGa#rEuTp~{JPADCF@9fet_+C4;n
zX-3}*CRCzz-I53nR2B~R0og7XNe4eUD0b=S;sRza){r@Ia3m&Zj^no{@#W=3kFm`~
z)`9CV_n8TMtSp%E2l#H2Z$w|kZC0WvX8-D{*2>+1{lS$`rh{Lnn}ljlsoa>}hZl`A
zYHLX8b=tx-4}B@*MrMh5cXerIE?CZgYn*zR$xP-OG$0cXE12dtX~DZ3j=ZIEx)^U%
zsKTcp3+T+K`sqddW1OJO7Nevx3=7{B=Ps_+^z%p+$m@2Yx_GJWUR={vYlB(XXv|G^
z<IkqYcr=T|O(Vow%A}|iA``vq_QKS0IXa$T@SkDrYt70Qi98?OqN{%`iSng?Md?!w
zk;U%7(BLO<IPox|T`43mTF3(LmPOj&O+lFGjV|=~ztI<8O#gB;h1CNOi&2WG6m)z(
z4_-CB!(6+ud&)QH$al}^Lbg4$K8F{+)~l-%&3e1StJF!8l55K%Eu$5MZ}Z+D_BhQj
zFDzJU7`562?Ks5)$2$l~VU?<XV_sb9@{gMi)KsXlNzZIGGagWh*>1yokxk~z{^5<-
z=&XFnV0^;u_v7(nx(Re=DIMx|ViekbDZJ~jLol&Qi{_ma|0SqlIquah+03M|bMU@A
z2vEgS0kzykO-TiqkivA7aeO%P#1@TjNWWjR5)o4G{}8QRYg7^uTB(6p<BQ^MqHeQ`
zCpV}?#i()f&jtPm^jP1~Z=?(h6#bLT8b|Z;v<BN;kZs@JTCkD+Z(=OGdi55U%{S+7
z2W^I<CSWZ96X96kzSb@_V)A*~y(God9H!cHll~<c2DDVO#oEl19VQtGt!6>Mj&Qc_
zc9TCQ7Yy7a{msH&1bYl%=%$lmCNh7&L6ve~+nBD`4c|Z7AMEBngRK-ESJknC_4iX?
zRB={2)YgQvJj$#Xi=xXoYsvEkuIiJQGNLTv>yLCD{)yVqzvF*$`tGcll+^@nAe@yL
z<qZ=H=SuvGO{=(TfYE?A%B=^!WCIr#bgVexir(Qb3#3t+*dmqDB7;#RZ}@-zggjuJ
zA5J!Hy97QS##Zr*7kv*N<Y3r%_cQ}b;y0YaFQ&Ei^mu}{;c@J+?aKz!-{6LiPS-4R
zH3KtTwJRH)?LywsQ+(W1_VJHZcLE1YZJ5WVOQ@XA)_j*8#_71Kn6C4uo;tSYa<_eR
z!c75hXI=Me?H;eAY~rVG_kn*nolWO%$X7V>Wr%KkV%4_MWrFo^dQCnoJdTci4;RDv
z@>Es#k1rU8Gmh^)e2YtkAYjOG)Fzy9M~EJN<Q<l-Mx>%4do^U~?O|l{_dT+41><!n
zBxhG(3v{1?QAmCKGWzWJ7#9ou<k@NT=i_5muJl9T@K2xoc_{J1yHbBgz?k&=!Ln{*
zE!`#Dpw(@i?r6(R;vOm5VIEY<_DLEcSlu94u;{?uq1!^V52GK^E9Gsdq*v%&!=+K(
z*3IpCi<AG^Yy6$%VmRzr#vaXBoMN;90H4Z>Kw}x<H2I$123V13{cG7ree59{;-c+g
z-(hATBnLz5@dNYzLrQ-aH*@kQ6qYdh-Wn6YH}U9a?WZ8S?^Nh48H{Orpvzy<(PkIy
z2%cI6A7Q~zoT1w#>neq8$9)kyAiewmikrN`M<&Qh=eVN{X!4cLCnV%J<;auIKU23X
z)Z#{@cmpyX-`dd#C`RYqoD&n$Enb+KtKn@3+&;$y3QKV-!{LAEN@dE+QmT|Y)Y<Ji
z^dV;0F0a&C3{G>6ZjPqF>)C-Q<gHFwGnTh>H^!Naidi#_-iEWgm{!hYKnGvWjTbFY
zLWVSIx~nUeEQAPq%klD<Jd^icB2K&dAg^w4#!y|UY5J7q%O&cx1qO`50F=aEhI~6(
zp_>s(q4ILA(mGR%vO2#J+qvcAZQ&rK`+0wYAa<J9{%1dPyH{(c`R#vx*^Ye3`Qv}K
zi#7bP9FXEEt3+dRnU5)9Ate@bvV&j%^xk9KyLn}Mrv~-k(5J7e9P*RlDI5@IVQ?WI
z+f{ypf~4EX4Ys3`4JsUe+tRHo__+M2pCifyq81*f2CSAdtCp{WAK-fmp#)KM5K6&E
z**gq?00bzo6#}Lav9a(LU(=~CopSqaKbmwXX!{}TW}OxVNof$J0C1v^ixM5B{iXJ9
zkEYS{IUhits$B8Z{MbPz5T+cA7E=ASi8k2ZXHL0~?QfCRSGv1@>4#8vpY7FM3zuEp
z?J@j}uMOq!X->;a?HLv|4ie`kXRyP9ygM(d#r5F5I`fX!OuSNOOncR~w}~?_hAw|*
z+Y%+UZ%>-gaZG`Zr%Idxb4<k{2B2y>5YB4q<7%3iUU*jrExGeU;ADKdfjSE8{19S2
z(d&@+(WwvTf!A?=vUf-s`56<~p%!7^t}2m{F1*3`*+HW!;hN4aBc0_{SMBi0C|hOF
zOHX)(l3BM5Mrv*aOZ$FG@5W{ut6=^8cXie0<4*2ukFc*)sKhNF;=0cnihzI$NAwaw
z<tbd7VM3kT?*?Ywv8|h4ySpzlZ0vO&Xq!|X*fFX}T(U@i+2Ww<OQbn4#LPTScverO
zv>5wSuQ&HzOI<etD+(hjT|GtJ9jRJdq{U)JPZLh689Db4`&Y#e`1?FHBRrk$;m`_s
z{I0w%K}<$R@s_JNb*Y!<&;6qyK_%F_PSfeBe4g0-GVKQD9bc^Jz<_5#T4@T{blRJQ
zgA}^oWu<|C)V3eBA?^^UU&GxO*UNpNppk`8&=$%4viIHSv~utE_d2~<@qDuZi<xfy
zEAn;T_Q})~)h*enYH+s<oU;+KWfl9l&G{Gd&MA;2pgG*J%a3MzvrlZfl(~zdYHSU_
zwaw{X%AIxsDJ~8j{&X6JRZBj=7CIH_O*Y5G@X*_TZSd>zhtWH_!-LffzJFjLy;QcY
z5!yQ8+dr@v2px(h9Zt}g^DXMMOQ&)4FKzFcWq8y5L|uvy*puBoG4D4iy$QLc;Lb_5
z-IZa_tX>EK#e=w=;)oCiX;GPukgcpEWa^G;tnJy>n^hv~n^tgV%BiC+{$6=E+(9F_
znHOPy{FBJ=%v|qg?{VNpEPBEoMl#bi%!it4C(_5NP9-^q=N`%~pF}^FbP@4^C9Do;
zS{*3rTFGC}S_)&O)0uQf0XZI(hqEd`L{YN6Qo10l_I!3ZiT*}b8umDEuI(OJyaJuT
zA8f{r)jfbw$IGo<$4ti)?OY}u_*TSV#4NgHy5^Kzw<;w-WzhROjw<B<yXs&b0$Z|o
zbS~7uq%9~mDITmX7hq%({h5Rop&WVTc&*Sv=VQ#vu5fSnP~St0rqVqF+dTMxP)h>@
z6aWAK2mpMv)I#xqj;!^d007P50stHU9G82Z0Uv(}00XOM!%?ee!%=m33jhHG=mP)%
z1n2_*0NgzNSL3#_zw=k9@K`&$!N5YdTiP_2yUT7lW!XJIzi#0AII)trICidO18KMa
z`<od_wq!d2wtek+H>YJ|S(-1+j7FnTHa^=fM7BMQl5L*cu=ygNrpej{+j_diMrjx&
zlLLQN<m0WE*s``Z&e9q4{c({O8TWk_&E{#AvtTIFxX8ILpV!t@`!Epv#lC(DgPiBl
zjO*tp=UJYnvCs_(&$E$!NXzD|93j%tHP7{v35(ByDE5jxs-WG?Vh;dv?j6N^#*_R4
zzLzkABur=eX%^(u67QU6L7ry%c^Zgm91VX<mZBsOqbSnPMHb^*DH%zCmdew$wY5<k
z2*Hk%ae8!@XTb;nyXo*3KFT`>YYaZ?>+AS70}`S%VP!8yBfT~1WcJ{?hhRa*Sw7{m
zD;MxR$h&M3-SC73?EABmuI^1xA_Gb=OpBZb-$Wr#nF|={o9Iw03<kN6Ibg|Ck6nLE
zBhi2nC8M|q2``7vL9AdA;tF&g<uUKFpa9j%7|5LfrO+W02$|bJ1rxv=51|#92%`};
z2ibz%PNUHj-^0j+V9b&25C|k}Z=#sLPH&Sq4MLtt$aWi>XX%J5QME8MPlbqv@q&eF
z2&liwh{Q7nm`p50&S${YEKMdALmGbsXCU5DI^%i<k(iQ~2nd)JBG(d>VAD8GZxQ1-
z;&CVr<iIFAVCLrsA_e{=c?2CZk9vU?THW4&mKxI`JJCcO)Iq7~DH1M5!JLP(FX)X0
zQ1(UElVHa2HBSUEW3X3efYdXc6U(vUM#{Q^d{GKlzUi`%kAou4QSA^5a|?f*VtC88
zwkAB^lKnc8p=mK2CSdpq;F}_jNpoRrpeIfD0jmrqTyh6WJIW#=jfTEY_*NVxnik`=
z67O6pkS3fEks#8sp&+`OImkz^cTkB?wvsPo?$7Xf_wM!Uj*Z4wlYSbFt|gWXW_%T{
zI2{EgH}Lr|U5ID|DxhCjgXMpic^oVNSE)MZKfYlfsEuS;2|mkb^RSrB6_uhGlFo>O
zY7TXqG>%c4fO_Pj)XgbaeW`kb2;OF(6IV18UVlrkan^?LtV~+lU}<yRHP-Tpqib|N
zupT8aZn#hm*~|EU3NCUF9>Jq0hMG#{6w|bbLl(e90@Cl32v$4bBhP<8fwj?|7enn@
zNqpdYQcw~#G~-*Ktf)0ep^pyGkCG^lf;jq>yPeY2$ydq~L;H+_Cq(;5LZ%=D%>Uu=
z<oLBO{qBU{&86@@Wa%&fmqZSAQBf^ORzVn6{x6QeqX3=N0-@Mw;78XZMFxnU;<%?c
z&~$~CuL^`=HABD%tH*y0SJ27TC<2-P<NLGkj!)mRAO}efAtcZw1GR$ZNb4J*o|9q?
zv-BF4s^cT=ib2uQO+%;K0Ek0~pbnW7mAH=P)Re=#D&2zBv*Iyau%3i2o5ITR_t+`S
zoNU%4_Si*M@HNHS7{VHyJ>Pc)kH=kBjqc%CwOlKtuhIlmr9givd+dylIIOuyW$EvX
z;T{sH$0BU6h1l{P2+1Jat-f|7rT-v}lqyt`E{Z4zE#dAc7G1Oeo7##%vTPKloPI$f
zU{M8u=7lu<_{bt$=}X2v(2&s-F?p5FN~E|rCuuHay`0+HG`j{XLl8EdZ+Q-$LD>gU
zq9?136rw=AuV#PIut1cjjo`R^Ns|@9BnHU{95DtbxS~pD31L${>R32IM97&jeFv61
zDqq+>puG>xy{&xOI;HM4W>AHNg67YP<BYkCFrX;i09vqU>Ru~#N}>i6njWO2qOzu(
zbwjafKIgd%4GlY7rd<-VWw6zQVWM>bH>dav<Iu8)itB%5*oT&?{4K~yl+N9v1Y^oN
z!h0r%P^vzkj_w~Y47MKmqC|dt<ewhCJE}9OCM#de75|>^W0c_ggM+Q-R~GTK2Ku*r
zIEUD$qTf$5{jw`V0th*}sNtNP9T-5Cl}|Rez?D=t8TE@fxI)h43eTD*!_g@qloor*
zIPR0mX)k~2)D%?;%3}i>8X|el7La-B!y?6b;ug*(L@1lWVVlggdF)hx^Q>mb+PS2&
zd64@unDA@Cglmh?AoFg-=Mb~nqd}IiOn~Jc;g_z4J<apEKZ+ypJKnb!7axvjFu5PO
z&-G{^#1R6g!zL5Np{A%^zC?}c&|`)3eL~*djH`c?v>luMAcEN}ccDbFElLHoWCPYg
zZ0mHoxGC~L1U@<_w=;NtWw?&UEc;Sr{$!;LSPqdfcyvUe15uQqjO@DPKqcmmrL7pH
zf~2$yv<+u9btPeyA}GW@8Y#*jfO}Jp&P;f)i*cP-vcuK|ba1-NajLO17*H;=nKKBs
zem#HO`q$3Zm%g`k^=!NLd?}uO1^<txK?dHr=X%dNPS?WUF)eats|inJk_S-Eyh)Z8
zb9dLYy1I=S{T)gRRobJcb+~}|O!JJ3oof+L_w?R8$by-Y3hpv|+8)e!7{M0FN00&-
zB75)H#58PI&7(waOc(<9$FOHZi>xim%4B~Jb-YR%|1hwtz0Ygl1tq0Aqv<*98Lb}H
zK;#)f%T$>+e6de5=B_2rju&!jbtw7dv;`K1Oe;EQ-&R*kKxaYM!1e5cK7wnGgAsQc
zF@mdLkjYM08TkLfas#{OK<{M9zyq{+Y1*tMM6?A{_dE}^>cNh&s8*tt?9If)Cis5?
z5R+pw&baK_g^&OOfjguUg0$NxpE73<aay*ooK=C4rBZ`Uai!fbbyN|8q{5gBwXjw(
zi6S-%kzvGIk6>g%wwY$c6i9=<`XZ~A3u3PVfn*v88`Le)mvZrMqfm7%7qR<kdu^oh
z^oj?BRUt;5EL0HC@o}WsXpJ#t)*F9zE3-LSwi>`N!?cp5SF$5ggOn)r8}C#~0OR8-
zkGAn*=cRmqL7RUrGq$5N%lIf~H$hyK87PN*y~D0~6+lWRAx10W+?FoPQk^Beeo_yU
zhib?`A-O{LPt8u<;;POi6=-G_4bT9p*k<e%73gm<oz3Pcn3IqhJ(_xIt&)EVI;9($
zByU{2fBpUda2OrSL^&%HTd*wTI>lR(Y_klU{-(G-fF&=Bo%S>olP?d5xn(`T;$eCm
zB<%MG2o4i;xSO-32fi1RGTrKR6e=%Twooo^rF?bQ_KCK+)_s?=u4sbJ>RZKLG&Tvf
z7gkfVqL9?e=-UcH=CGGSq1S&lAZl76?P>uNJjg~<pVDti*mJb;3?p(CKB6ltlhjJ=
zV_22q9`5JidmKzeZ^ssU`TYaW&^?0u2?>)JSt?Uq^58~eDg-$RhX2Hr0uJ=v-q<}7
zc|x(Jbs0R2phK<E6q_in^U6WTu)Bd^10BaWh~gsSRWeX9rd>;>_5*(b`|<GX6q9C1
zMhVH@CZJegb3~pNI3lTl;O2Z3jTaSZ9Ly1==(S1+<~kBOFXNWtcR?!VjzfNrVjjxr
zl7Vh5KAfe)Ut}yT<?J?QlV)pGyo+99j@x%Nv>6ySkt_vo@Q9-h*bu_w>oqGnh8>j1
zVcpNXNC3)(Oe3>Ycyxad@EJJXBzJ5wXnmz!-sR5T-eBjez5Usykl$yx*x~yXOzQE}
z&Dty=l~&h{GW)PZ&D!?T3B(faM0#A5C4!Zk4FT){o{5<_tZn+WMjAa@H|(mPJ(zT`
z*1dg$TkVfk;G^O!6}v1KUL#;rVFV_yG09mtMGNS!nA1>;0vvxWN5hm5WS%S%0u3=?
zZ=?L%V(7KdWC)>3ypltk=Ck-O@M<S`1Y_apGT2HH^toW)Uc5VDIRGF{*5`}@mx?Hl
zxM+6-8Eqn^bS)NE6W`T-r)GN*#&@DU)dNybNLFr!J1rvHT{Y)rE?npX{&S+xKIXTc
zs?&b@_n$b=awvbf7zk`ay&Ax7gic+|a~vE)F<>yF?J7hW0!>LC9Al)){PUy3vsd5x
z-yQwzehlLH<6vj&%d5`8ZwPN87vy8*Sj*!tm7g@m(j@us4uA5`j?TY7xj3&Kw6tdQ
z{aJ9$eONRg-p4Jro!Qsof_U8RfB0JNg}(VX*aiBC$ftiuZ|4AC60zS5F2&{f)w2$2
zJEi$6`aba#r1kisMs;V|DaNu)FcE7Ysn!CCL~3sniyT+?wC#z!CI0F-A&92VEZF*{
zSVxg5_*Ixd62}}*>3Lm8HX0jz3B&G0ZH`5%1_mpU(`puyYfQN$DsmMr!RfNynsYZ`
zfd)JpoP2*}=;|t?O@KAnO1KiI?LINN%)aiio#y(!ME;SIf0<29irJ87;M&Qc?fGyM
zJ?FLD6J@p8H%&&GblktrZ0ayjz?yu=HY29Q#oR?&=sXnona!wEr4S6$Y7r=S*G@Bl
zNlMRbbbaL9C1&c#?qL`g_NyNeVoXdyWyDLdr{jN;*)c`RG}>BEXdX|~{CGB(<zPIN
zlevz?P5RK0&c<Q47*)Nns6Z6MSRLQ2cgnJ^pVJ~QhWz+wnXhlH+ad}Va;OJktXCak
zS2Y0OMWFfL?Hew4$vzBU^$+k|LG%x{w>$l%-R#c>!!a)S?Covl>Lcvtyqj2vflF`5
zw?BWv)8*~6ORv*cTtUNDl8(~!I^v!FB{I4FaXB>j;HXRRn!kXCE=HGL!t?FUJv37}
zOOQorA=oq(ImQ9t_bIz}8^z%$$U>aBFr5Xkqgcs}_q2bx@o<3=es$yp>nYZ?xiB1b
zFpMZQgX{B3gbznlAv77zG31sNf3TawGe>_u;;NGD_;u5L)bYcAHy=6JPWX{`?h!TO
zzS+jD9}SS-wpXM0_*fn{7#$4*hp>0YBLS-ai(?i^o1U$Zr~UxWTUvPg2mU;r7xPXZ
zb<>+q;ah!jH<ZjMF-W82n2*#QsevGXHa0HR7;X1&dUx<yVHO4*m;H~I8=v*>6D)tz
z^D0k4z#GQV4C`W9A1NTAY<%`0jG)3#9;Uz>ptB2IuUN`J3SILmdU-J(KY1|AH@($;
zR)Bbn)s>%(s?uib8`GO^Bf%I-z5lKq<tL$&C{Y~?*tNu<8n=j}MuW6OTp4gm6Nvjl
zFvcZ593$Vut$zhuzxrDc>OO)Dm^Xhq^^Nk+2oWqp7r?zL3|xw^@9uWIXOCxp1H+?R
z+-DR?s2og}@f%oRrF|gRGHKn8`G52}?4KN-zSYZ{L+cc*IJdXAo;iX2Y24$<lR3`>
z=I`KX_sJygeScmyO|S`M@53lR7+r_GT}L5!|MvJ)VJH#6o(V|Lg*QplNi2Vnob&7^
z8u62K5+!Qv;oBo~WD8}$ax`Ax2|*uo8W_&^wiRtew1RYc^wR}`g3yRK9bERGT$Ll-
z!M~s2-bVMmIC}+g)T?igPqDY75X#pZu(_s~t*niMNasHVgr2HS#1C$QD8^lnFGCI7
zH4mJ<mw2eW_XYmle}R9WKc|21pHn;i-QTD0^mmu~@9*JX>P|g&Y5eY<bF~DQKc^aT
zf1Gh{;FTB^_RkIM`v&&CI`-vwWwy4gIYt(LD^(5q9RQ@s+{jNO2xy$m_pQd4Esa20
zvvIF&_#O?H=N?^{8n8RqkLLxJKf*>JadLJ|jzQImLJ&g5rSIPV&}DyzKOCbozl8-G
zO{apoPvqb7;mpJZqH1gXytVa3t@U?PY+*EO6&rzizQ1#~zq4aPBVhf0-xAzjTO-M6
zcYptG_lp-zG4H+DxqGp{(-id=&v)*2pTD%;?CmcZvPWd@zudWd`FYC>?e4&v-7lZ-
zH0Nn==ZojS+m{w;QrLejc%-~r@JNxj(JaMYr$8lI<iRW@WlMwpw9LhyRKE#*)OO{A
zIvIfuC0;_QcWri6>H`WtyZH8pqqBd$KR;TMK2f{XIDoC{Lk?8&tP?vI_nxSpt3y29
z+i4!@#^~iAgcf>&t6hx{){Kcf>TW?LZB7>&=<WJLj<yXd%A|jST+tP@$DMUsLsy@8
zSBGJLGU0TT_y?#r$3KLH@@B%{LjS8NA6?U#on9uZs}hf+UZrYk#CH)KSx{HyeE;I<
zC<!Ddvnjkrab<(R`*|&ewUk>QomD%lO+njyDG1<(*SqW9FDZBd1m;?xy=%S@ZpX-T
zwaVkLiHzhx1h;<_nra*^GBAo{nl(BP)m43%RL!jd`KWrG#^}|WS|s%turX~bR8x`6
zwL;>}w0nttBab&W<*HJmuSt8UzTxF1+a1)e1cX@i&#g*z8M+lhww_(Q`+#*Oc{)nt
z)dW-55=zK(j_f-1TxTW3QkRy`zF8{0)Xz0brghz6Z%KbG^O<rodb1;Mh8bHVyj~mT
zFlTB*hDYo@G?*thz0I0O(?}a7nPdgg3Nmy_wXUUBC8K4%qpULhJ4&o|!J~PCTl1*K
zRX8b;T1|AfcA{<h9eGkTj*<{I5N_sR{<60YyTkRXr=9h|zrVS9MgS;4*S~#H4%mKq
zdx?pHb*EcObva~zy#3+2@nH9=%i_V#Rfl|4tZpJ0e$@;R>4uB>c9}rbHOZ8V<kG<_
z(l_Q>SLa$E2AGfygP<y%$5}*;+3J{~4NOx@S6NiG9xYpmS}s`V;fmyH8N_e$e0sHm
zEWe+r@(L_suX?zorv8iz0c&>+L8leaDjs!wv0~va;i$xaWZit{SZ<EStE!cMvoys)
zSg<=P!P;+8BI+#($9{VQuS&}7SL0a@FV$7y^40GZwX#fmS^g%NM}}9!YHZ2fO80fI
z=fb_yf1%f^TXE$dmLYr{h16xtVV_vX<+MZ9qvgArUN;RbDDo8Jl#xFQ<}E=}SI4)u
zdHu*)!nsU;Ug{F!dI6{cDCCBgvbYi590?MW-yoY5crHkr)Hwnz<gHYF>uX5>y7Jyb
zDJ8NS+pDYcj)y{!-JN@a*%LgQOJq$}DbrE}?Q46Z(k*koTIBK;xkm-i$fx_8_q6|e
z=PRe0ipmNy@L^&?S&>&*e?qGVu7E8>5N-MdT?^8G)W#Jx6H=)Lu0m+3-c{w=<OTSW
z^j`!7&nR2r8Z{8Rt8R;dHV9R0T0$BW4mLYiR!_y2FFh1?fPQ%{uxi-ui5^Pi0o@AG
z{d7~U>nAy9{AdU5@T8@x*U4A<Z|WjlaiySbP_Q7lB)z2rs9=ah1R|?Z@q4H4hSky~
z#b*kC^BHrVw77CLtYJ6RZ37dg=|RAe8C%~L>u4>#8??HxtNIG?9bdHEFuF%!D?g}q
zQ1yVLea~1ADpdV29gqk05M!`9#<*$){D%XvKf%9&CooHfEt0y#Waoe`P-Niy@giV~
zzh5F3TyXGV6k_6jqV86-q!ij`%EAKy_7Wd|bKi|O+w-^IbNi#K2$dP3r8lDgrFa~D
zmL<nGie9Up%kwFCZORaXxw*K9kW$Enlca1Er+8t*SQqddQWZZ|l|VZG+gLN&j7c#e
zf3u{1>$5U0D~_*bTR&ZS%E9V*ub#fD2h*A{jiJr7VKexb?1BF4O|MGR_1K_l#<1dl
zfqM8iXoy9)7tHZOMY&Y9ZK;)@Dm|Lx{X=~~tMo@b8R|p5T&GHEsxAk{^rEEhP(=Ej
z9eYR!o8mG*xXke%yq*Y#=icc;!uq)_=@Yzl#_<h`!t7NYQ<lB(ys^{i)N62;|NKLX
zvt1P+R;~blWJX<rD1Rr0c!XWRZJLFD_4W$BUxr{yY^XxAftQSR_7-!Cuwy{p!wNl~
z?3Fk{`X2ov9$kAinQWk!hN{8n1kCa~69&HxSc3{qJ=iRT(K1NCH3rH18%t>=-X3$c
zTir~faZ)m{<2HV|CC!AYX|;+fS}))btflg4krUl7+qr7(W=Mv*J#ThvuA%yWBMVa{
zQ>Xa%14i$%G}Spxna!X&dZG30K2~+o;rPAEFSH_QPb1*x{*q(dXkd^mXJbh^p(+2m
z`E9Us<GA<H8Ei?Xt}R-Xj5cy&bP{co_NWB)-;mmQ%vB@v{QN=l(wq}p_ppRn{y^JY
zUSj0d1<le@9^`KyIkUQ`YsJ)m(h)!XTEzdulZMAktW2&EyL4^nlO}f(#CMG~O-$=e
zRPbS^d?=BM7cbq+`8rDGMeaT8%ae<@hbKR+!xP;d?XA1;l@2Rl%<K0ThbJen;#bpT
zUw=VFrc4xp)L#8A$tMy9oc&pJNg*vGYlEFo{Ycp$!J@U`#MlK``cpW6_=_4*QeaBF
zSXvb+q6z3iUmg{!U$F3HV=vDN{$$Jp{88Q>wn2>K|8IM5zTdWy?2rCGPk}>^1k&ar
z%4-x;I*KL7O1vFQb}}O>{D34VA|eTfAhjg+?@8`U+{e0aajUBLMgs(CIdSH9I1`Hm
zda2&3tE;QN)m_Zz@r2obC3fB)Z%*QI+=c!@5v0|0Xnhv`A%f^46?`$f(%>jV5*#4c
z>$kWSqEbifiOxjKjuY0(Gx8CjhTou$*pavc>+%Vu`2YT&|JRwKdoEz8DA}(G2$Kn}
zm%cN35A8Ib#gj=$eMxz;6Ndc*=0CuQ<hER{<oyX2nq3^s9e4?U|GHjIlliOUoHB1Y
zbI-=dLtqe;!aaIP*gJ_Yvg^l*al{HZhOcP9<VuDLyW{zIno|(7on6`?3z%&*o&5XG
z_e1uQrl*$RleV6qM=!$}NzCt0p4Xmuce^1hr|_;PwVSXd^#T;9-x7AUw_U&X6ZKjv
ztT&7=5Xy7nEg0N?r(>PUtJ4d|$<2PmE>$n8mxQ}#uJw8Z3k}sERwV>svrJ*D%BsjZ
zZQ!3Z_t2_FM^RCU%;(G<g%u)vu5N3~#SeMvsw;-_UQ~ICJ%JbEcfF|JDxC*Wz=*=<
z$jS|Ba?U-nWd%1|W)8UHbp!2?M*h0|u#+j<il1Mn5Fy%swU%7?Okv%~r)V?CmyhGJ
zL72Y#%q7Tgxb7VI&c=~+D5*fpaU}n=<~ra;{?#Q5P_!rS?nhrDDbOIZ6Y=$Jua!UX
zN_iP%f`)#SIj7Q!&Zw{OfR;K@=NKFt0^7QN)KIb@-9Ts)+B(xOdOM|w6X{lg4!tAD
z0hSV-n~3dy9>o*DFLYfa91Cg0$VCfJI^>TcF>4uMj96>zJ`AM2nWJYpcNH20>8(;V
z8}Ebw(M&8{aw=ZMIoH<Yw?#Y;t((>Qh@mY2OK|o%qfnbM3XhicAo>Fvrp`+Jn=*h&
z88@f3;!7Gweu8^w{-%5VYz~8@!^R>+S0wHBd}S_wW%PfF<(TadNQ+;)-eM{LaNE1*
zvyxPIKu0*b$6}vV&j-;L1W3P<p_mnBwTOm4m1y|cYiQ;tR|DFo&g*ygN@X%VYPTEk
zfA~!k2L9I2!MF}7>Evn9>EvH-@kJr_a}pL^sK4DV*Aep*zQMu3%P?I?D<F;1l(&Yx
zt_)y*wDh8KyA7mQ7D~$DhykL4w0!DshOl<fpSX|bG>?Aw!#SKl$=shmxePC3!bd&J
z0{!9*C}%EOyc1K&_cjG>u!><FPm>mhj@b;+J1_0dXMYU4W8Nf7Xi9fYwjF}ml4^$O
zTh@nAGf9J9EWH5Q%(f0QoG!AOrMM1p_s(yBNjcr-lw9x65$~U_p;hX%gw9~G9Qkln
z<mAPakTr>ySCPSzgePqhU<3}s$eG~fc<E<oP;08%o$CFJT1wQMlG>q-hOq&_<+7Nd
zJ0ZGWQLP5YB4-n3A~gf19E?gle2>v@H`886fIwoRPB)l!(D>2nAi8++S%jXY-JI2b
zDw7U=-#<lGp)BAT@yGK(c>`Fep-xtl;V@zODX_vtU=*VxKCfwMa4=B`flsAG7x^~h
zoX<p|&UCcUm(8i5l1(p1lXQW?tcgiWrt&tuQ#~C;lM4*HHAbxYRtcyodBa&aXq7T7
zX3Kj!c<%k!8wQg}II1+rJpEp!wEG!<+n9@VW7o>rwAO)JQ-j6h>6o+81c)SdLnxBB
zHd)xRW`){QMc5atG*39nr9DloP)BBy#5+2NFKgO5z06xF>Tzxzib6z>GKZO(z`UrZ
z0gMz!9q0uUVIvyNmlU~LCmKDHvF0dFlR6!!h)xAo8>*_p_^O7<LHye-!#|aOr;P+!
zms&mgZA9^fWYXh%K67!ClHqBiT00}j&`Sx1^<+AV=4DH8F-u%b=tzU2H$EwAZ;igv
z;$>OWTYCfLp(GYESS66o@i@miNF^%eag|h&F1BLUsA2o1UgzR|_B$^A3A=UR@k=F*
zXo0KFy?5^A3HiJ&yn?jM;A(|`z)VCW#WK(75b@nj$t=|O^)XD_Tv=WtsYAjt)5fbA
z>@>Rlzst}gU5R<*mzGxP6NND=lutvd@nodMt<FrNR(WKUxm^|><Xz88aGP-4@qP;N
zu+DTfWXqPa8g^#PYJIl5gF1QS%&&CIsigIquexBly$pURqK)fCwh4@X>_drs8Y-0-
z(Vv+uYp0@Is{NJX(>555K6lcZCY<h>D_w_WO0^;1&SYL=BHw;*qBZ2Q?sS{7X1WEs
z`_sOsK%r+8PR{4UP7(rt?k8VR>gg|L%z4@+^oW-ZnI}K>O5%*w*0gYlXHHxAr0{#I
z%1v|@PnCZV_8<`t(kh^Th&1hjt_dw@c+BmMh0B;Rv^~6PcN}7MN+}HzT2Ju6nILyA
z`{)qv+cQu*Ynl$@t^vh>27IatuMcOn70VP7BLsBV1yjE>7?Vmf491;tFm*KrM%3u|
zoZM#>Z@1+&Q;K?giC1!;QmhAG;uSEKV%D8s5%nfXj(*kdMNz4LdBrUiM0%A@n@m5N
z2j|oivJ}!w(1}HRnqjLcBlKVzY6X!?4nk=Az|3QK3?7~co>QW%T@N{i4+}Ev-r&9_
z$e=I}-aS~*o9bl?Em*Qh!`?ep^-@c*;xk6`KkNa5iQge8z+#JprA>KW<HrRM#!AeG
z^IFE)wX`C?<nP3P-Lh&)iNbnoH4%db-+Vbqw<bLqWENVXsSKy7Cv`YnhSz1is>+Tz
zCET*hlA6pCt|1ldB05alwQXY0ih@457V{mZ&6(X{$QbP6DhQ%n9KLuGdrY?|s0nmO
z0LBf{b{R`|dNmKM*dyY@N-$j%QcX!W!nCR2)My(CzwxDifEC*I0_(F2Vzi%iATk)a
zO$IrZh?VPj3G5xVz3=c+PY2{vGsYwliMfhrYg6MK^jb5I#;h!iaw4V>=`a<`ehu%I
z2&LOGtd0f@6k!97K7m&Esda#)?2ksCbImwc-5u|kIeqm+raGP-rcv)g3rj)K<C9U4
z3~L0c8qNuSVIR&_lb#lPL>2lRFQ~2q#C@*PZq#gSH|{qc_&B>1csA`w+dlgXFEPlu
zTs-LW%m{%b=}J_eCZt&qzr>E(aM%;2y4FxTi4TS8rvBCHaPN~iwgZj5WFN^KqdL=u
zbvDo?9Q$Kl;%JpyrDjI2qz-d)FEtu`hMLGgxuF|>GWj8jZ6245+|rK`A5D58s!JPP
zt*oqlEY~y6=!tU*xsfbIP@<56nCKom`pTj3rE-X+($7k6@J|0LlK|$5^$$mJQbzax
z*!twvyg!<s{5r>c(#z1caQ2(_eyi$Ymy_y~;~8er&z!`}2Jp@o?sE={w5ab3c<Su?
zR$rNatXTu)O3N=9BzTE<kdwAUWTdV`GINmT6)Dnwy8Q0xdSFM9O%EIi@gmS@U(R@m
z)7Xw@P7qi9@(w_!K|FM1&=ixHC8e!&WKqToREhfONUQV@8a?u%iym>D-_iO;JT#JP
zI0Y&w49-0nBj8my4|>6%V2}X$ns~*S=go_M65w;hf5Q7m0Q9RXMB*1&+sbYU@9r9B
z_bT#l{VvBKJ~%pl_HzH|)!{n}JVz&>F$l)d=%zD>M)Ppi;8~v^y*xR5*LnNq_#`)P
zgJf)kR^KD%42*l*VnuDD_IwtPM&ayx)M>Zy#*r1T!>Ee#{B$76D1p8PE-}l-_*7|s
zaG+*Uco{5%a%0KxdRk@nSMWx?<MARwtpHsOvVz&F8li#)sal^{;bv7w{0CT$&C)%*
z+-%Hk!mLVgTQd7>wjvWrxo9AZ!SnZyS9->sd0@vp^2c6$2_4;hQnKJp)|>{~YFlQ(
zY&GYW44MD4X3uJN%PZLPM(muBU=$gDhEBmW{KBYLW+`Iq{ACQBEmg3?2t!{lWNjpb
zckCntKgGsyiz%Z)+ny1?{Fj?~tz5rb`KPY|S=epbUK-%<l=^0&g2`r~yeT7HY%CQn
z8wCO*N5S@WC=iWR8z!i_rhl-QQN#?Ft<q8>zs!x1?aUiVL$?~%`^ho8*u)TjMlCqe
zwAqzs2JJMRN>4@2_hww#q)6;grHotP0QOpHsUQl;zih~Y-BQz3KjVlpPiyyg&ddXV
z>Z|j9@MCy`1j_TFNmxd!S-uw8Y;3xkY&F|`hLwjojobnn4o-BIRg|lAe<RuJbRc7=
z!zX@OR9NNZ<sFM1-N5Y}2B<=Rxr9+FdD4OQE_0w=uTKh_O%eS#+3j1kV1_PwjvI!Y
z>4OR#D)1nf2kIt}74-#(baZG*3`l9GgrN?_&t2pV)TdR}wHM(HPXga|MQ?LjPqk*G
z?NGmb?`tbr4-_aK`?%l3X{-l4uib8-&d}CsEnCUyv<0TPP7B7^*5y5a=Nga3m8Zlu
z(Jre3(%J{C>S=>LTf*cD@4o4N%{K!yW^l^&E3ZXN0tEq;@hm6);_bA+Wlm|0m$HA&
z5Gll-Zi(0qwT#^fmf)RI(ZGXcS+X=Wh4M)q3|;)1DT*jwa+EIF^?y1O>VyN>#BRnh
z?&0fb6twKw`h=)rMiEqhT7|yK-maepwcpSF*sk9H#IVDKULk#S*RuKl_X^ZP%ir-U
z+AaDX{jziG+>yc%-JYzvmM5Bc8T9*|VTg;R>~rh})FJDdp;kN?XdGdwXv~ZLT4dgK
zau;@5Pl`S0S_B)!mt&`{jaILH1$o)HXt+SJ7agsJeT-3r6~It`4f~I`<@ewCm!!|X
zjKmkoOKra^XfrQZ(wG)9#=&)mUo1icAbl!DO566HoLAX$y3l1+@iFg{3SaDGS>v<*
z&YK7&v=q@>au1jz%4GsSMs~n$&j=@lx+JuMf2~Ux)0WrCn}aty!c7MOGGZDB*5oKm
za(jrGL!&E6IGf*pv`X>$4lPRE9vXyLCQ<RQ243SYldTLZYF%FB+0vb@#2Dk!HT5=0
zN2xy*GA5e)vq<mIL06M-cKS=ZbarpA1U>Q;{yq&n3iYtJTV@X`PaDCm1MwjDbH_mR
z1`ZtcqJScpWSh=9KQOAW-6PQ-gNg@|<SZPuO3X2-L|zPkl1l$<H6o>}0}me|<*<y7
zFY0IUv>#oH9;R8w3+|&Lb52i6Y-;fLrCkcX39`$lN-h2M!6#@Ol(}r8E_ZGcEzl<0
z{R(mw9ZA6ay`GR<KqxLM1m_FAsn*@D1Z+yYT;zQ_jOTH8f!f}6B5hAqcJm%ha6iM^
zToZ<`0=~6>-AUiFq3ny3g`!?-HL&Zp`rXs^UVHPbx0s<*B%a9H^}*<}B7%BKeQGrK
zVIqYXI3SqaATPTJLl#dU9`$)kuuVL=fLmZ+iL`RrB(CzYQ$aQz^>;zeY1i>P)Vo_o
zf<Kl)_^dcYdadB~Mf$&w^p1-zmB>(GXlZeNSN=qQ<+9;6zFc~)-kSI<{a%>KRrkgl
z@m&Zf@UOvTz^?E;Jl47pUL+5H+illTrPZz<4})3QZ`Y@bdApo`{Oa)FXus;=B*kzI
zq#bK7)pod9HFsophkfvWC7)|I*8go-l#s18oQ?TtzDHR*->!t`Q1HCV4Mp%T0=m`7
z2@ywsTgGE3^Ws1Pr}=h(S<c{;I-d<NQQ6=4*T#6GZ|gy*8ja8MVz^K0=USQQwj-is
zeB|h!PsRlfpNV1uV!UJ);2bUgF>10F`^70#JA^plF-<ZMHyfGtgv&HA^*uQ9hArXr
zN3yL!q7->V$gJhZ4qYk9^1!m<DoSl9{JIr?l~(^c)q&eN`pvIWJ@|F01G#YYuK><$
z>xf0{<XykP{8BE$T$XgfMGKa^&C&}|Vz0&phjt&?a2@?@SCR()y8Ygqq3-Qoq*-+X
zmaSvu>Z|)-?~mGbWZrPX>V@rc=@n{!N^R)8XK!Ag9KJs3934O&Iyg_ls}4$KsBbWT
zb~vL)uoBTDp3x;jz-7`V^UN}k!4k4wMg?3n)8L$`WQD9Fra&)F#&J^b_L8}pHjNqV
zCXLJO#x$NL-;Tx!8obP)D_YHO<Ha0yaW(;4<5`k?+v~QzdHmo(^P5MHw!S3<o^NOU
z7PK=_C4P$@GNeGbhFtybx1;mckI#aC?W1UT{r&yl4j*k@{?FqV^PBfKPN1zq1Li>6
zvBLi3px{vESxBqH7ySC^f3;7a9qga%w@;PMgM^c__SvrB*7duYC+4S`y`x>4{z@y;
ze4Je}X&85=i!R}=qzVTH#!7|bJ}=qIvAl4u@M)7ab1`m}2!W#1#`(szK)8#4v&+mj
zzgm8||L*(4lNaw_eOJmX-}g<Is=~4<e#Qc;NG(A=cvZDED<r{>4h#kw@PR1l^RHD6
zYjM2<YkQ^oECrP#z3I|#=F+WdGr>60b>UNYU0j)AR)X7KPHIB{r7s{~PD#F0>8hT;
zF>Y-18yo-fT>>Z-?__PyDRvxx9M$RUjFDvEyvh-XEXh5Yug=dnSVfEZ05}*L<6$P-
z>t40)KYsXa{|07x)MHuUG4Q@8_aAOIn_CaRork?)3(nV7p!ToffTBOLIUrZkL6^DN
z;27<&=4q9%#tzi#hc9gH!{##|vS;{hH`kWLFrT67#I!@X&XpF+OeMyDhiPXvPXBE!
zX-1KGLBWnC6djj`d<ndMG#G@actPv#VcaLvqzOB#u$>%4r)Xkr$D??1&ZQI-dU?Z`
zfZ*Dm%$ZIU)_+@svm0%uUp5=S;YU%gar4_~@3+?R;mgBkCmY*acj3S1@7}zEKP)v3
zYMqtB=5qq?etL2E?hyWeM*WTZ2OHblcwZvuyay|KdmCQvzlK*^Y{{@>Fy%M4pTXqU
z6Nu>AED5?`LmH&sNHm%yEWwjU-~YOG`7Aj8{X^$yF?ck5eKQ(t>uG}?Yz&;R`%k_x
zi;4QYBI9u~6&W6Xv;BArK2807JQz&E`Bgl-V2K9k>8dA4qIt-FUPYHT`U}K1U@yW+
z?ftR-3eK6pp4m(DjbJG3-2|p6zP<n5W;nTQL3i~xIUhnj#bz%Ujk-baq6GqG{cv(w
z|Ka$}>$mlrMbx?9dD3a(|L#9}g8zRE|7~^d-+$<T5t)gpO^l-P5&4;?WMPw3#K<g{
z(;6MO1zcKtBkHk#bvAg5%5@2}2K=*A^dTcZ4f;;=`%U-Pv@1B9?L;`n;pCG)ozCDs
zEY<0PF`jILCYf#+Cvz(uU9E{~@rSb<DK0<)rGr^6C$QwYfZ!}$2nf7%shV-tpe}2G
z?HyUTUFyVwvxuATrL}1E@inigOc+dlvd_5QP|46Sh{i>KCdMCGsriXL*K3NUG+mYo
zcr5V>H1(F&$(fBbbPSrnl#!k(6Ue78BZ=;LKspvVAVQ<?0YtC0GzyoP(Y|8Qv<Y;?
z4|xT6AkVnX0LcJ%gI&`Ci5Ve%WR6}HyxXAqc=BENud>$`n1$^B#5R(T!Kk|!R~pPX
z=_5Qx4nu{1!r_NdwL7Tg+oh=@fVMlsQPSS)jiM<c#S#Tfs@&r_;b2bJg)Vz1qHMV$
z1OswiF(=2?$!rliuSt#${&v?STE(b%*Wc5~+8*Q<=r%{_u<4g44mD(c1s!&+@|Tn@
zXDQVqh38wlty#gmO2n;9=EA5dSwZQPZ#VTizCvGr^lo?_O?0HyAo0eF-q3JmhGz_D
zkzWT5gE>6V3KL#d{W=`D@TLcnOEvK@Gzx=Ok6EZN5J?n=(A+HVFm#&$jaAK(Vwj3^
ztR2--j7{+Pu07>Uzf=R@SVR=xorU>EX+x$+xkAZU;JlZGl^*U^?A?xEIc+Ko9N?HC
zMa{H-B?4DearVr1g-RJ%DIv)}LBjEZhO9cE9k7Vmv>XQPTI)Qz3@6@Ayui3@<0wgZ
zpf-Zm#we@!6SMUxj8R>as|ufaP8g!y5QxBP@!T|n9RsZ}@l6kJrm;>%Ue$!dE7fX*
z78A}&UhL9Htc0JOK7x@fnDBT`?ipYpqM6cv6A#<YZg@&(rzK{^bml6@FhN|7%j+tg
ztbp(fYQaip895nm&CZP2y=NM5#A&U0hLdMW4(6S2qrD#*M_DPP*!^Wdt=fGiUb_2@
zmAocUD6ooI)LqQOa!G&}m#SXL#0@Qm80xHU1(Z-{ehW&|oGT`T>{<p+bQn;u&6w?f
z88U^iubbMn`)ImU!@t|=@5z_2Vrnk2V!CgvL6ej3$W<%0N(s)z4HNhUwc_~1NBN8s
z;2&@X^JPZuL?W}hv&h%}SrHP2zhgh_5plv6)V1^9;hQ7WnnbwvdEzRvzzHiP;XmfV
z1#En9#J>TaIXTzLK)t5d#beVv7Vsv2bYUaYu!0DV#UE}zdVpq~Xd+&zdS(A|N<l7o
zfMxvsNdDG<NFoMX`3D*V!-?VajI4mgH_}GY4|QBX37=~2P-H;<U75f{qJ^pLOyy#U
zEXF~!l+M!JA6>S9y1Y!Ms%%FAvZZJP?VkxakaA8l$U@q-`oIqZ@JnuqoTNB^IY|aN
zSy-yP1~!BRD=E?*bTzXq%++GR=o@`zN$$(}Q%TJP1f*j0@vMkj=t3=H>_?f9o0)(V
zSBPbniLF&v76-~-+Kwxot(nf2vR>avc064ZwvkXl9;{vX%1iUj$RQfJ^#zR5tSp1W
z^9+u0U(*WywSaqLy+hbVS;pmmA`}2SY1h?x$v7X&qwz+9;EMPMSu6hK)Z<si5$Gl2
zeT?JyJQ&6Cv@?o(!Dy3YRQisLv{mxj&MCd2^%~SwRXpD6FlVQJl?Js1v48$D^IKNh
z3OGJ1VEAa(EBmu~cygJoYzp9geST>iFy)c55F!p!+O+FfQX7gw?a*L<_OTyU*sVyS
zP;g!zeK?e55L`{|lpE5bX!uVq<LQbXPM@agq43P|jP9~|xt&O02G)zi9>ruGN}2Gt
z$LyFnwKGdYS@ITqKo@o@bd}nd84+>5BCHGLWH^TbO3wu}IXDlm`Jd;b;2fkvQskY$
z$_^84uIL&db7mR!r_T(3tO8-+Aga+00POPQk58s8(XFcqQA{XDRdng9Vs}?iSkT&<
z5)rgRL{wSvau&-kpjDu&ARy?>Jkvz!je<g-$<gGD=Vf#m9@EW@b9pnyhgW_cA^RFf
z^ap452++_P^TbwOntXQVC$k>IU|<XkM6HX?uL;GM`!%UpdjBnd^XF-*Gs?{>+urap
zZ`nM@k7|K>PAeWSvC>V}<TM$XQe8}A1U3?jWC<CtM%=lLS)L%;EM2!(QWJVfcM1~h
zJ}oC5WaYp%f&`?xWZpRs<LB=V4m(OoOhbCl^XYyP|Akkn?)1yNWisAhW<tE=W(lD<
zYuKbAQla6a>#eGP$3n?3MtwTdn<rRIq7P~McBs5v6{vbN7cxttBumP=FDF$lRfMFy
z!aUQ(R1~@!eD+w=qUxy!%=V*_TE%Zii{$aT%lQ5+EMtb1$qeDf@nsaEcOBI_3kT2P
z@bG>%YSC;u{iW@fHqY+<HEUa2wX3YmTUwY;*+ju{Xq7g9l9ET3_g1$^BPa@n@fm&e
zOwLofw`P^^dh_7?PwBGT?K2EN#qea%Z8M7vJ;fO)LQ~QA%#?WE_&`0vldA`rd9WA6
zd9XC>mqb>y2m36Z7xkAh7Zt%VhvBgUQy#yg6b0BF9nq?J1YwFL1wGLC=I66bTb#*0
z_TooP$0+50^P?Yt@w3Xb&ORrz@e^mL{FAR8icHnQEKu!gw3D#eIBrY3#0f+@BQq*d
z!D^+^0C8EG*ZX!73#Hcd5&5)=r-XLjZtq5#f~xr4sBz86ZkWaSSG!Tpz9671X^A*B
zA1&zqv_!7+I>+8@9w&qX2Qw)x!uR1MoJGBCH3Cn60NY41ug$^)u?|*KDF`Kje)vCU
zr8@9v?ejPrP}!?4d|Gir7kLeAT5d>@8-4+$*R1$TOXC_rt`X`j(S#5$jCs>CI!U{`
zO%|wQlLJttx+<B932P#1eKv?kqxcHr0tgKw?=>`$F_(hsGzWR;H<g0*)!VMCZBIpA
zXWnyvQNJgip|XK7SwVufUa_}rN>fy7Uf#Hn0$Fs`8$}l(S*9dIjE@W5msIIyAxJ_o
z%kknIs;2d>H;GO97h%vBs-j#)k~JAfFA>f=%=*~IEVdR}>BZsxLFpfM)$s5gZ_0~c
z(jUPTaFc-QA1kF*ncgt`951@85^TLAJC4eK7%$JDPtl+{UYGzETDN$x6k=xn5X!C(
z`-UZziv%_+xtY;S!{0?!%5PO~FlARLRd1Y}S3PdILo?N!0^-B(!04Xj{uW3t_qVq3
zpZoaFV@C%3u+o6hurCV81w(oI8{1osSP?uQrwF|-BAA`!i|Qo{zyV))h=@8*Q^#9>
z<)rK`D*Zw9pE|4))T;{&#d3*XLDyIaGQ61=Pf+V$a$IBJhfgtm9<!zvMit?0XsIR_
z&dTr}aCAxQ(84akRbf3ZJA|~1&th^3Ja7=9o|0MfrE+Fp+A(#OOS?)6RMKt<5(kSB
z#UQ}zBG)t=Oe?7c3x&L*_5PgOiD#^T*YAM+6kDgFAY6~%AD-aMK(-(Rh5)%KthMrk
zt%p>-#{&oEm5B?F6hSOxC1hQ`>>R6SFtyd!+MC`ePQtPkF9JVbY~9?}n&In(HZ<7y
zQM*n$`}|u3$E+zjqt)j#7Ml9p{*qIl^x`uNP`+D4aFBwn!owcZAd%S5%xjT<H}v{|
zc7j+3(jSl|9NUl(V=a64<EzP5YwAf3!a=YY&1sd3<e6QNUdmP1ew<xEHXTI3TZS_e
z=TdykYq5+RSZ^|nKrR-DzveA$GL0t*UTp9jG>`R-0QXVDT1n<v^LD}$4o1GNrn%x;
z8c{mVM_MDsIf>G=L~<uKIZ54rDwBQ;a|pIlp0rBg&sRAXsx~k|*OW_bGjP#_Oh|P>
zSqhJG*}*e)Ke?{Ye6f4FJ6;k^dLc77jhL{gAMT8EAV%}Da#KFqWmxVRP*+7TrqRgj
zhWr7ry;->8JagZItpKP)4%f_ZHi=P2$0JD?BsV+W4?lTtNuPi$=lG|8=Wn{d!XV4M
z4#PnO(deDF{n8ovK5SMvYKf&=UJ*wGxatf~R7|{PRl*SmW(OM8cB8qG+CkjuFZ;EB
zbF3<KuRx+cCcvb7VdaYxhrP)=^M_-P9Sv6^7^&T9CwEUvZU5}EsXbxXU!--OUlEKm
z90h0ngk3={A&l9FLKg~uD8`Sd$gQ+Wyr><1XT$u>N|=%h8y^YMz0y*kmbU^Xvvfh+
z@s1GqY#Jv?MECk=^iKgC2mG{l9$ssq=<jgnp?=ltF6NwlQ7}F)98c#r9$n$?*hDL9
z)i2=tvRuKR0Y|TK)Htc*1y#KZayO1ySr%<{7BV6q6$pdHL`>p;QN+57PL?Q-+{6o8
zU8ozjiK>W!!wbcIPMc?^%^lkg-Js8rY0I#mB^*06P$nmnIbJx<63(Bww+tIPbX2%P
zy|&&w?jTJ@vC#xwM|!<@F`;7w%?IFN5vh3+(wt~{J%-!voscbm%{vA<z1Pr{MjH9l
z`>3bSDd!|Oryl-)=q!=2s7bfLzrg^^hu^Yp_;r!6h=fyO^t~WuN6-BZ*v^QyMJd=R
z1p=jjo?QE@LY5L35gL{Jwm`TdNFNZob+l)5E}E&dsS^Hp=uCH)B8Zt}(XPJ80r>oS
zOz^%Sa+4dWiN&2crhYv-Eb}j`BDnK_J7QBqhvq0u6nfKt{@Ww2wU#b#=ZGg^gvc3$
zVLxdE(})|v?fph}oG5$L$nveV5p9lx>l*ixZav)Eylic61|#Ng-s2{7ul9fLygNL8
z|MKJ*Dr|0TZKbs2yxJW`FVA;;xc~0ii_VXS|3W%%=Hg|hCS6mblM6SqZJI2`PQ5U?
zmH{i$!_SL<{vx52#4F}F1x0kj*$OIS-kvi?hliKartL4)zzq{DT6GOF9(^ZV?oQ06
zP0U7pAI>q9E%Qx)O^=a`yk7(v#d#61-0}iLUrJUOQcey$+>*~ktnrD;eKkZA=JLZW
z@2+ft_mrh9V2nsZW_G~6rhPet^cx2lF?}~|V?(xowbI7gnT{3-D;j+q9&X(g<fSVp
zW^1VkG4rR6H8gxic>7|Ol$i*5`1$j!E2&SGSy;*?17qP8GOolAFq4jUve&#GvA-&(
zcuyPnR-7h&vE`FCja)1AVhYo)7Uk=h?|K5>!P4|G#1vc)7+sf%GpD%woT<3yHW{a!
zJS)_Hj#^yzTB2ODGO?z{(WDH+W0pCO=fUX6Rj^o|P;ApsM4<bGtQ9>cCN8Hf);#(B
zbZ26TcEk<V3#Nt^_$q)Jby=i>AY*l^E<;}l2N3LVQ5&O!GrWqr$c^Lk(P!_FtQ0QK
zWo?tM-n={Pyxsr)@VImI`rzo<{>htn%<#K^-zrTfXj@YfpyJa>srY_JO{<?14SAd1
z`DJR;`&0G=28UvaUma9BBXWcoDBF;M<TFN!B?L{-96KRrF(vhw2EbxM!#shNG@Hlq
zD5;Zx59cCVpQh~y3w=_u9;G?SI&N>?9k-yjH<j#yUxuSrIPq85KR9)Fn_F2n^Vtx8
zTAWrCdBu3{pv9=8r<Q(z5r!=TPlr{&1PUw_8LuTu=-teY>3B_1M@0g5H6A5~_r*M(
zm3E&79K2Rw*5<l{hnXNPYQ~Vck~<9(+4E!AeM<9qbYBzGN;<HGmDJ85`U1q0<`!~8
zLW98l#I|g4E%sbDcp1vsfLZ-)(H#qa`LAroRG7!;*sZyO^?x*v5#N@un5~Khi|Xbv
zjBm64(m>MKgT~$zdCOqdVqL!(!!c;{JZWuhc3T_$HP{VI+WViwaBh>sB%H<E(-_RX
zQ~<>k+?ErvZ>+Gk#uiSLs7xl15L{_qhCU<ASp<m*XK3=rle-Np%0|C>_abk9w`I#m
z;$u2dO3GbP0vU2)2KX_e9y8xC;-fRijlUR7hot43F7v{4-6t;LFz(3Yi+MdEI)YeZ
z8L`Ijngpa>ct}4<429pdrdM(FEbA&;rX$8^sx{v7MZHc?I-$}Kj3LP~!-L1=J@=U6
z$J9~fC2L<%bd1a-Tmoi8omWtQ-U1{w`vTa&$IA$g2jo7B&HFgM>By#j1hEC5i3cA^
z;Lf!xXzzIp#A>s0m%dIv1|KnRrInfOBOEKT8<JgJ8H#`V^5fyNkMG}C_Rt-Y-GS6P
zXMZ%es-LoRW?=?@ELK<Hswz^XKRJB$_T~P`;VSAcIS!4DWKUOkZA7+zI!xDpZ6?8h
zpHo^0&FDNHJj_;x$t{r0^8co##4PgUPMs{GIUdQ&y;0&YE_@gE!ss%@TLEk@>Y>@{
zOguDzKu#b^phZTi_@hHZ(<PTP#}R{?Y#1>eAK5SmUN7j2Xt<{#<`h9QGsw`-#9!iW
zO6-t|T7GUOc&z=mM@o}_CeF3#Xpa^$*|wq@`E!Tq-<pn-`I)VO!{Ky6nz4Q|W<7`@
z8c9{2M4Oh5N3c(nB}p}ppk112jl`0he4|<u7L;<|VO=dMvZQ&Zp3SXIN{bxDOa=-}
z34{hikpuN!QU^sRuaxG?-gJFrDKf0|$_lLS(PYjW2P51f!@g>NI?Xt-KLTBrTp)X0
zTq!C25)njzH-rg#>i^S*7)7#Cp14WTEpn&nzvI1@rvN@>$+CG2Of#B}=teR%!<I=E
z_~C`!g9YG~iK<ioX8zA<oJ0(tlr^C0@pt?Jn^1seJzIsQe~|f6CRYnjc@Ly9KJ;D^
zl`(k~i%RP;N2K(B%_tr9=BaMUlD3Qi77No<1}biGdD`TvOC%1BU&20Dujy7iO8#_e
z|7ZzYialg9LbI1v>zz9FdRjFWOE70@C3P+KY#2-?;plV3#~v#pTYCH@71o&c<959p
zepEss-=pC{Z4&n=#w|w{tu%NQjj|F?IyF?IWJ@3povJ8*Wg`*SZ#ZtPey}+h#aAut
zrcD$X;dF`Pk0;ObQS1FN*KOr0Fk#Wu7SVMLnPqJp&%$)_Ld11~UxVvCW~kI5c8`TI
zu+pT})F=fKfy*P2oMzD&Q4)c~q}^wPEX0#(ItngI@X9*K1N93)OKB7JlTI}0@I6Y?
z5M=Abm<o)4>9*T@mo0lZH~0KhHGG+ydpzfx8VQcWsd_06Z5f6k0(C%TSWn2}J-Vul
z_2<b|f<Pk{mzplNC}py}y!*_i!SOO>BvqBVn>9*XBJ61S3u<8UB_dNGvOyE!pUE6m
zaI++1((BH1=~ZJ&OtKH_WUkOJYel(7E12BCQKFlF;Em>FZVX=HEfN{-4`j3&8Zyc8
zB_6^wRr=J**2&^pN-v@;DcBr(d26Om0@q})IN^p=nCRo07AL;Qv{+|)SOld+<2(jb
zXs$@%icFJSqE*2;BnikJbHPMe)oQD`iu$OlW;Em}^5m<6h8tYe@DQ}^IU=EX8xa?C
ze49Iem{+d4;145CY^dnPY5rv?3Qk<Nx1xN8bgUqq@k)%jo)X61C*kaKBr%LonJI?(
z5})0a#Z2*wsot=lM&;wp9KY;T%BRu-m-cBtSM18u`w)c<A(nK$$8cCQGdsse|2|a8
z8~$eXf*Z`p%u*`#O{G$wSO4MY;P8!g53NamCuo~{+J3rumMuhKwQI)V72!m`CRsty
zl#&Eo`;*uu#bzXCAqI}cGjrX0$1YNn==Ts&vb92(5m%HjTMcwr<6)6UWRU%Ei!+7_
z8hKu!KW<3CtmdY6hcV;s4g0%*?V9XX5z!X@o5UK8HS}B^hrxtE*U#g*RtlQCG7IQ`
zaeXB#uB93ynsyzUcG_Nfm$OWJ`lt3#n(>C6mRc9UF~x9I-Q`vF>Q~V$$!e=|My#^4
zy5d=3H<D6igT|5H-D&h0#Nws9t3|d3sEEjhpVV2!c$;-#;S11Y63zn()v?YY#Ud`_
zppw?u6cYmOc>BU^9ns&%(eG^csF7=bG+S_NDli4JmAQn@i>0a;g>|nVhfD{JG78!<
zt68fC7$KIeGI)Rk*yI*t?r%UEaZ#)AAR;e>8e6<p5qv45a6+{}14JMQCJWc_wVOlm
z63!8U->Ev%J5MT$tXa48dJaEs>#^0O73l#WZQ?q)lFNFUr4=QYv|dRlJq2%n&;*(9
zM$z=-!@6=z7jh%)=;z2WSOx)(RzBj_`K1tHC=5;G?mzk7uL~W`?bHU(N%_BTgD1@=
z9w#-yYh35u(U{H}$uPd^NZ00Y8YOW*qzl$P8aED3jE*a<t<5lrT20?9e#vo8?P}&m
zg3l?xri^zO58&QaBT8xn#ia~?uFM0u-ca>@!|s<Nk`J5p-aa0?F(O>@8KO^f`|lw7
zG?$A$Olna*d(SH|wQ9*Ssay5R>()gQ<~h*MKK6fEF+=J_?Wy`@WsJonOnSjId~x#X
zr6q9uuf*|ZOB}Z;Ye>^N!L-*CE|#iA7}vWUtw<5s;vl43NyO+()`^pU*Jt@D(f|ry
zMPSU8T%qTd@$Tw%`1cNh>W3BP-<>qdb<~ioATnUKrJnv6AFWGx8oaB+$vO1c&||e*
z`gbt}>^3*TNrO>|*Xz7uXAm}uqaaKp$#KhnJ_{94nvZU!bLiwLjAx>S%^g50#krBm
zu`mN3g*`bB=HV5^Gr&oI;Cnls-}H^>h%1C~b2<UfV3Co}PZlBC&ChD^?!3GH^C8~#
zvCniu(k2bF$l~Ts=tUlkSQ+l%iOPW<m#W7uJ2|>6%vqYBL*T)AzVT&UHs)5AF3)po
z6V+V)&oS^(OkV9l0@IVbz_Xq=GPx*+euyXMyXE~kYU~mHj__4~`D5RVbDt61UbaEl
zc2&rnEZq?K-f65uG2!sKcnk}XO+_Si(JlM!<Q}4yA#|C!!(%w<XzIXG%9tc<4mc{i
z)o^ms0}fd+ie?$$_P7oipL5?=Qi5D3d{KmS!gtSRn#5)ZJIZN#lG{$?U6w__02{MG
z1PfltHJdY@CD^Hdny0pC-m)CZaogdHy^_1O>XvM8ZF5+%Y15pyp9j6L8^;&xXcI)<
ztbIOhmcqCC0naG6d6JNr&B({;FrLSic6~aWw)c2Y+shVOtREfhw)^)g^h{ilynOTh
z(QDf<b`+n3a42e_O4uVPb8ozfK5#9H!G3>#WBWP$e{3Xwh6lrV9AfHCUd`*nlXuTL
zulAq4IC_ox3tyy9-(09SQbOVT9noL|Q#zfyAtgv4Y3FVI-tT36e$IlE`xRRliYC!K
z3Px}?wyTp@mg+V!Ljl6@Z4JdnIA$>tjktbOm_}FetZzS86?7P!YwFY~fnIgyu&;Un
zVKAq&7^g*lUny3QgUdQ$7S<=QtM<rwS#M>kdJf&pi}9;`L&yXI)*GBE>{JzGTG(m5
z;m9Q3YMChhS~juNxD5DPr*SEFXQ6zoYs!^6#yG`Yw1@Fl6RsCC=5%_aLU>m!E3<=K
z9Yqc>j(B8N*HAJ?q_br}lB`v)@!z*7jIx~$qkx%zEhP5DcDAK$74qTH8!ieuE6H{{
zUk*Zyax<<}HXMbV&Z)`P3Pi*<+0}*C>>ckVF%tQJI8cX#!ju~ul?ywGtiy)<hT((P
zVic{nR5CijL5=s0xGkUftbY37z}<qmFFT2-b`?E)FkWnSIx9u#{MS2g@S#eH;fQ&|
zqGC{gJtc;LgKtoNG3w6YO0pffGj1UzKem(Xc-=s;(1sjC<exbIk--uL9y+9P|Nip-
zx!)K^lS<uVc+ar$kXjBn5$3xJqIuov&Se6l|7pjsb-Ja%Qq&h!F?GXVcMT!3X-HSP
zC*IM4ZyNgOLKEJc86uM=l#$?MU!o}m^@@;xh%)1MhtJ=<BXR1F<Z6KUns0IUE7Kkb
z+enoKh=n6W<=an{Z`&GM*?)d=NM^xxSV4yY^zvXa;pl^isRiR)@kFT~_!Iex%9cNP
z@v88#c^D1rOp#k=^r9b$PbUvl@u;^%QpJ6m#>G4?G_=ox2|;WT%ost7sDd~^3xW85
zYUXicU9l|EGC86%3$9X}%mj18LQh%L9s{vM3T=iVo+#9wYn(J8=%|Oj5ua2?m{Vz(
zv@`5?(BmQ1Z!(WFlCxD5hM=Wl&I%TjKMVw1$h6_G*8Ehf7c-%HizWg@j*(#|&^uO-
zHm>K8&pG{8UCY4L{HMch0f?}OUJ#srF@e67cFU)~JUzPyyUlPr2W~BOZWgih9LMGi
zEHiOl&BhjEXXq@TGj^7B#Uzm(xnp&GPH0a@L{D83aYfu<D))7i-~29M<+HH7PA`}1
z_ZpRt<mZNegK0#8*6vg)sPGoo>QG?k@aGd&X#H-xUOwf}oidH%n%#jw#=-!92wu%M
zGUmkcIUy-h(Supc38{JNhjWTnfIA7R?eDR*8v3iylRmf+%(5=+m$UQx4Q_f;>yH{y
zl7M{Jc=GVE%!)usb_-_WCLw6o<y??sk7^IG)^<<Z{q_}Fho+k;OU^#d8rK%m+yE!r
zd55)A6e7e0QL)DMpp>b5Kfxb=^d$w(j4!x$5Dm~0yz40YtZF-!_F6GySw`toSxjuV
za<k>I$Ux0D(iZ6htw1xkx{BiYS;pUzGL>5&%!2bVh`*InACuDVS)WbOnVy6jJN9Ju
z&<2g5rr$xC?Nty~=AvVBT^~Mw&*vA9=1}!|!vIk={Y@n}rO)p&)t*X!g#v{@DS?0Q
z@kAh1B^S|z`JrEiGt^<Wwl^nn=V}&A(a2@hSt2T`*f|-LJ8h9=AC0MM`{(EfiSgG?
z@ZP@B+^RpSH}!x(Zm8=Fk0-u$8Va%0c@!uct~X!Vsv63m4J1E3<8c;F5RL{~;#Z6X
z$HaM9aA;^_GUe16PQdnm=HHmghS5F`o#MI(5JjaAib9~FzfxKK{^WTIN06o#6%YTa
zfH8H}O85CVW1Hh!j-ACR>WB{$c^uM=Ou(d?lsu!}=iLKXsl<IiPC_AO<&nfhDfl;=
zWER7K5k5?%QlipdeuH}W?%kVrKxP`rQQ#U5iU-Wr^qRCw(w+Q&`C<ed$PS?7RIn0L
z!MWP$-pt8-gO*}OWEPp4AEdOLe<$cnY~3oIU9cPZsZl#&%06-`r2V&X!o1aGcDK4%
zE0~|IDCa5~XS)c8^PO6+cq)fK^Jom5OniavKoc!-#Kz~qkjGQ>_V+i&wT*sl<Hp-~
zv9s}NV@lk5HlXx>{>HyH#v6SHYts+Ctkf|`8hFSi9+Yn1!YwiLQ$$r2=<M%Bw)iYQ
zZ0abCiZULZNd`qpe>O&}aTGylJCS#jnGAlBVKpm>TF$6hzaX?J%C2X*EIZ>Ms!}P&
z%?B|`2=9np{S;#4+OH-2oa+G?u*I5uT?X&8rpFj27D)zwSq1ASXG<aR9;a4Dwtfi5
zy=cnM=Dd@%XIc4-2%;=oIPQl1yt+_<-3dri+p#6Jbe2y}hN@<2^~dmL$=%LUzR1*c
ziZSvNvfzs#grqp4Tk-JfhR_q&xrQI#Fdc0_%(Lj%E$*(au2A*BGqxco^OIok4`{_-
zKK*46&G}t_;X!#eu2Q(;y`eZ)!N_|?5gW*=#Bn@EL3N6@pm6eA;AK3cnwQoA>d$#>
z72J?VAvs)A&;+y^db@ygP0u@EIRD)n?_?GMGo5(Pqojv+qtGqeTh05mt%tSdH@ukT
zhxF-UG-^C~u&q=OomuZeZ+0`~`UjkuRqrCa(GIGA*PaeA-i`4$p)SvY$$41be)vce
zH^@5{cPu-FIoq&|GP-3!ai`Ilg64^5{W4U48o_iz>sBP?tWtqrP>N9m%UPwr*Jn^3
z`%>jEGRVI%exbP*9M*h;d^(*$3)0)Iq`K)M2#YS?@HSnaWzj51yb(0t$VM1xk+yqq
zTpG%Mpq$t@NL<DHg))?R)9>+Oc#UTjd+|Y0n%1c^?(=|rItC+8It-E_h+cI{Q-KrL
zbw4`C!)2w;FCX(2Sbw(uDk>J_#Q{bkQ+)p;4xCn5*Z=9~>EzX6^YGu_4`09gemFfj
z`Qi1et>(`k#=|%74&QJ6H0r;8{b35i=MQgxf{W?gH*%l+)O`Nx{loc(lkZ-?+j=<t
z>Ec}wY(4z>-Ot0<AD;gZ|McPAhkkQ9KmK_bd^r4Y-^x9iCf)t5A5T8KyXd}e&W}&t
zz3%_M^^HnpXy*{!l`&*4q{?^vcgNJ1;Jue(p_~z}d()*=;k@xmSvvYGFqD_fdabs9
z&pe{*XTW~zz<t~PlE6hdA*_o;<x<nZVeE(cnEGIJ>Hou#e)(kt5A-5#9-OgWv@UgC
zK&bkdaOkiG^s%?F*Z%2i!Yr1i%bG6b*J)aYTpq23QZ1H7Y+=ck9r~9W08*+VF<U61
zW(m0PapcT2p9~{R1p5L8c0C^DOwLPxVEi)HTI&z$=ufv5Oe<}k%H#^eSB~F7{-J3j
zh-h<{O2S^Ytty%>!yewPXnsM#XjEpaZHKSAwqHeXBq%A3jYNT8F;t@8VYT}%z!tjs
z3}_QGB~RxOQ#K>-Znm4c4AsrUSkNO|g)XjO`CFvNH+h`uH=sasfcH0&Js#hGg;BT6
z?1oik>Q-26z77-b5gEJn>f{3**|o}6HQWd;<A@x~pe&CSZJi*^mXy<r!_DpcH*k=P
zdJS~9<R=og#-KbjdhpW)?WG!cuz7TV9+@4&<tF2q)`QJp0dsvZY2Ad0SxQCUS+o@i
zG~lc-Eg9i~(2`_qIa<rLEl22oY+F8BV5f@fba9%3)SmRB2&Nl|HKS;7Qx@+vz4wKz
zn&c!?d;=l<(Sv&T(F3l_N<H}gN03$TIfgr37`>a%)IKEyET@7@{nG3sLaVcsFs~X8
zYy83x7{nGywzPvd(YVTzebkbv*xWO8{%Mxlizkzi3WDUc<=@?78@-@^KE+kD?H<+$
z(#Ojcr?_pJ&AxI|ORY{dWO*wTxNHx5@ZHI)w<lh`?y?L^TWl=!`0um%cp4&pc-5ot
zbNp^btj)3GkmCr6fXzdG?&fQxN{<KLR^Tuoe_%u5F_CP^*l6REtsu+T`4*|24@HV>
zxvcmwt8?SB#M_nB{3U0947Ze((<Le_9$i@4CNjLxJgQW}tkD2lK%~FMcfbw${3>hk
zV4T|Za2%`XD>sO)<@}<2oD<qJlBGa4j#=6Ex5KwTR!o(Z>dH18B@*h~6h+dzeYzrl
zd5LM(s9HqD+|wSIj8VHF!;XU0R3G18Jn>WGom>sra&V3~y#!8GC$-%1e^1ZbWhp9|
z4mT`mYUE|{%g3H2O%b^4lu>#*Y?k+SxYd(?K8m}+=tbYJ?CqAByV$U~TPkkwwA@k1
z9*(?dj&|C#xzoJYNvF?BtQ{xRXSr8VW;*RyVOPGy%}9M*#|+Z!c5sgB>CHu8<xn$O
z%c34SQ|s$RD)RM*4yvTSf7z^q%|54l)U$XT_HUk{8+A|+!`CzmI~gKgS2Viuj)(CT
zIj=m6M_rH{J#JVfb+&hygE{)vcSA4ec4y(G9vs%&nGeH0mW2o*Xo9`dV@2uN2^E@Q
zhOlfq{4Mh#X9;?vXv*JunqaJ#uh4TE<9Jd7ZjXFmR%>q6wzrzwe~n;rQ=>?ewetnM
z4sLigu1`EOs^L^msBKgaR9xdvYfQALvCX^jV7vL~(c{J>zGT&Jf5WOD$3RpvM4zb{
z$Q`vwc$M5T1@LpkBsC;sdu!`SV-P0u=rS19;z4Z`oe$?p<MB5Sw;xc=sgNGOly}g6
z3wjf)8i%{{e)3W8f8&q6*+*XKM}s8si&2sV0sgV9TNHAoenwjSaAK|)Ee0KW<&`DT
zLA)}8NYun6Yhbbb$8P;@B|8oVqX6gLZcZ9X=NJwS{{2xKmP$hucyzEUt}ZBoFMBaz
zS|{8Jas!W0U-BCe$cL<Rmj?pI-!N>Vph~5ZRn$M#<x6l0e?5yLUWc&-hER3?;no)J
z#yFMmZ}Tz#%YQ$9NPix{FRrQSNaYvPWSPOISb-^XSyq1fF;(Yy>`sluq@O;p(|*$F
z>__8VI-A>1wyqy<Z@Gy5=F!&m!v|X~f*(J6a{b``wu|UT54NuFKiYB-{;;`yz5V2g
zgZTST9$W)ce~}HKKY#N;B~##mb1~Vt;Vl29(M+kz{=S%<BFT<#YRj(iG$`JgQZ_4L
zBWcff+H?GKx{;iL$Xvb_5Q+AbB~=x?rAbpUQ>h6fQ|;~T`Gk+78h0y3!GB_$6%#|q
zm+ap@vSg>j{xAE$c90+FjM^>&KZ|ro$<#h>GHmnsf8kb7I!aA%nt8>pKPdQt#fT!3
zeb15E$+!nB@82uly{O4Gy49hcou2T+hhB!*32b96w}#hQ1mBdb68fXp;T1D!B~Ra+
z<D{09yo$84`B3;NErrNalt|TAYqwL;vP_@G^Q)@F5e*M~FUUqQQ~TLrHjK=}ELBiu
z%h~K!f91O^y+3aXud5sq{6(0Mhn&@*EqBUJ%bMe|(`kx3rgcW0?~J;TWSHlp5lt{2
zC$pD9LE+IKzltxT@U6JfF)m{EcAnB*g2x<W$C$F@U81Q@tMn0Xq1cF(DgwiX?3(K1
z`Q-!{#bwmv=u3p~b{ERy1bn28rUDWm;YgThe>eI(LDLGILh8Y-6t)eV=B(ozX-637
zneHNkp|%Ot2~7>mLYTO~R7~F@2BA;_Vj00`j!wFB1o;_+vvAT&VW)wdfxhlu(5Ek2
zY2G-PETBhkEMPP!G`sT&&T)J$p&YY&&j}Tz^`BXds|RzYpwfww&)suWOS8#1k=O3-
ze+lShO4e*UEtm2q4q_&n^plTh>X8|HD8gxPCeUwN2x|GgVQm0M);`;m!~RW==@m4j
zb)_Z=XN~bJNg8{8mQGh*!~Nj{C1l1OX~6`k@}I|}Ox{@*<VnuCKHwv1B&fWMn8{g%
zIBdH4<m1H|>N$zVtBpd`kMZ&sFYH1%e=vvj!%>JHG$;5St<extX>wi`{h{^HJRBbq
zq#u@{Rz$tgz*m-3j6OK87vn0+Wkzu2C0POpO|>J!yP)@a4d)B=t3u}<^pY^Lks$a=
z=H;abHTI6f2?KjSGwt@7rVxgS<ZzmZ2f*K>U?ezsnp`fkIO!F+511YFVnR`sf6*Ac
zulhzWY`W-TT2-87pwWvbgXo;Co3bf^+N*}CeEH&m=fIeQXoADf7CQU)`*3oozpMX>
zTR*O>%(+@@#qs|BWa$K019I1%Y~xysk3QxKWCaR;WDWmBn@x5<7hE~AIc6*83yz0r
zPDZR6_N{&-_Q9ULxa3>cac00Rf4s{wRXvyk9o<vE8_jXGq4~UesTEA#<7!UXB@?{@
zoRz9chdab=jM8Q8URgJxVQL$b{EXru^W}WcBWoZTcN3$uIBrT2ku{nu!W4ZO07}dr
z1hwA$*e>9WvKnA23{E5HHD#$v9pH$LW5dhf=Z^QBV$)w$y<Zn_D49mRe+!aIhSB*D
zPbqxR>?s2ER`HUyqu+B<#l+pLI?$#M3uPI-V0O-cg-K$9A23nFU~Hb=!{2!OQj5^)
zHf}M`d(5u-I7Drus2PTU!{3sWqF?p%hIxbPrR+o}!0-U4mLkvXR>8Z-OZCy9>Rmc>
zIACnYrlavMbi<D7Vvy!Yf3rD2yX~mId!9@=b4@@bgV$tEXV#q(`5c%Q)bY813h;E2
z5C9Qc3OMe)ZDe#1)Zk!&_qdmFeKeX}^$|v?aS=_o8lCC&q}99!e~z|V@1kkkuMM{!
zJ-`Pqw_32YKq#Jsef<4utA&4lY&D-ey8jJ+J#3Aq4;l|1Jlfu(e_CQPV{f?bW?9d3
zkZpms;q%V3%?rc9K<uM@Uk0JoCs$l?R>B7)PuMY7xr}ko3R9cu^un$tbrzp#u2|4g
zk&(RAR#D&cb4(URUt}69I-nltt@L8y>%dWk=B|=qIBB}37($L^8EJ725;Mh4VQmyD
zQamP-7lnt20aG|ke|-9mP~`Wr0VEukJt5CE4$gz$VXC(ka1Wj(!mEaH=fguZ=odEd
zXb+y9!oPd<yAA*eQN>Dt%YQur61MquDh`87u|>UgWJ4ttS6}h`DfLyh?58aCj#5Ts
z2hiWScGcr>9&j~@Hor?A4{aw7@pxD_`cboc(AP!P8^HO0f7D|5g4(HS%mwfzluT#w
z6wM%8rTBa&f_WIOM2cF)S%k2&@anWsZ>~fOtBG$(GSPBNcWp!{@!FM&V;f?2S4=TT
zKN7jDb=dn>neW`P)36Hm&f04A0`u&1WV(t?3<VW0a=zg+eYNvV&f#Vy4MT>G!7R`)
z2R#XU!%2iue@1Sqa1!<t?}y+rIHna-C7}mf?(PirL!P-yehCiTC1Xn2{?L4c3`b~H
zR2s@$^I=@ckKT?4Z=D%I$XXT^Ub9eRnNNLbgkzz6vAHGQA`CQlQPwn6`S?=yoY5tj
z_j&eFg8AoKM&+19W!pcwps938gt1K<yULJ5B%9AOe^62jSSI?E=|U<@l}00A1}_%l
zxL|B3Qwr&^{6oxdMsN7(&AT6uUVm@gO)Z7D;u4BpswSN@B(M?U8VRRUVJsu9EgbTQ
zUSJy4Y8*_WfdCNI%;9ep(Mn@XKfikEi8^(UvA+qZmB^NX7s&_c^DyC7!9X_?d@3e;
z87^(tf8){nlzX2&wb6R)JQ&6Cl+Q+An$v2|eknGM8~x7?pB)>rhtcpg2cmG{r@&NR
z4(Bm0s50bTl>^30HNR%SC}T@Q$o;@<AZbF4Ho*mir8gF&m!KzdYpq#f9BUz~7&!VY
zFG8~a%wkj$cR_-K=)Mtl8O*pom7PzjBfQ+8e{o)kAK);N!wm8<ZodByCsp?l!fV*?
z#j~)rU2ksH@B7ZhDH(8jX{Of`Pg`V7>BRg;uB@Qw%2J({*R`k(>P*=33asVz2WO3w
zqz<n{q~{0YMzg;4i2;VB)*cqcHwam@RLrNLXS5AvrP`gBVyQX4j?63iEj7)7_|A0Z
ze?A5&KtE&t>3O{tFQJY&6yuOo{EY^`s>bJ&a6W>uh3Adm^^N9qr?!1YkMXJKVEBC;
zo;PdHXTiB1*O4(B>S<P`^#<ir^7R}%80biOF0hfG32S7n7+=>+xs(^)D^nKac{MRm
z;Em`ZJ#=A@=lGlHo|Ypltc{pm+$GTtf2pWrn-%pfb4K48j4m-Xi{8GOt=W*(`V}Wx
zX4Icz4nH%AD?n~&$-Nr<_w$FNgB4JX?T(R~vrUTz)9maB&c|Tx!l@7Ffhn%74xxpm
zVv@#bP#f&mp3`(M`lGCV)z9wzIz6v^jv{Rh$vqdVd40K_5-f*`<|l54hmn5bf3^lI
z_voE@yl}J0W4xX1jp9Xr9Zs3Z+&CRXRVT3*^m<{!EXHUrt<u*8@CzOxEs5b|iDq5_
zzt{x9;zEVt9OFG}Hx$){eRUT~Oobzga62)NbpXpX4ld-93nYvxh%L-2$|Skbo5e}O
zU<B$Ym*7Q(q(YJ}9G1>!LAMLUe{^p|!FWI>(gK2yB~}w_kqkw1wKfvf3vmcrd52LA
z;Gv~X+gI&=jUifzujy?@WlL)mW?>SKF2gUWP<TgGkpFrAZ>U7jIIDDL=DfixUk#()
z5Uar}ah&Fc_#~2LVRxt!lW05@q2j!<%y`dBMvHTtNwjDzbXR;Q_Y}`?e|}bJs9+Tq
zRh7&yZB-#M4+Z8HIbaAmT#zKIs%6U9yv4LqMY$t$pvj=QDwkEKcpBDVfgJ_Y$X?}~
zk~-?1_i$t9qAN<Go9vu~5y!g^9(J2S(A*j{gWmQx51QXJw}Qumtv)<?5^g_HGAAyB
z4wjZJ-YD9tT4&nv$Z5wye_gdh0i|imWqnQwj7i6l9&#%~h;j}EV@`?Qjfv<-=*2#B
zgov6AEarkmeKkNVNWkZ%?5)$CEbo#I)yOn@)L7R`KbB1=<I7}3LAF-OiZ?Irs~1PH
z>HaoHSnC2c#<cmc9f4;$rQzv}r}sO}?I#`cRBD2?ri>J6mH<<Re;@r$FvoZuDd`~F
zEnH2qY<F-$Z?+i4k9Z0EIMV~n<%(9B=%TuVx1sFHcI;mG(08fG8-@tw(7~CG)3)ez
z9Ij;(VKOP0_Hzm3@xLO}UnD}|AQY6Nb|CHl%2M9{%2KP#QZ&o{WPxgVopL<?c2bpX
zVPu(X7m}wCFp9L|e+eN)VG47a6Y9ilCSIP3)MvM1LCq93xtR03MiDg|_V4T{e)iPK
zkOB3!k#wh8$Amq*b(+*k3Aos4r1|URNkzpfGNn`zSwf=d7>A^u)6MD2A+2u;$bR@K
zDF3BLNLqH@1^}pAs$;2?WXGq&GSN=UCh>c0NtG96h^qOZf1x}_CanB236Rp5QXnrf
z2j~5h&N4}S=4fm}LPXK;f7X!<ajyL)?jJC9QOSlfGNFbHVcEa`{F_`Zta*3PGOc3W
z%h47+vTqJj_<6I^uL%w44Ek@c<?Z?9(_j3vyA?Eh`;suRmOUd2d#=URa$abp;3*AJ
zN#acsDyCnlf7lUPYy|^n;sUsk9@gdjZ1e`#z^NN7N+^-H-aUwQ@4&4~g_A*v?m|l6
zu5|^Xtze4;?M?oL%yNU|`rS$i10E)KVLj#qpe$mStWd#td@2PbnGvN*?OYO+#A{jQ
z=oo>2_X`u*$|@UY-eg#E!RRWuNy>W_Z#;ca^|}i<e+q;lY`Czt;Pg2dU0OBfx>&f(
z8ueLI+;Z_%biMX13*q_l{JB<(&>648D{uep5o>uQ8h;+geH|N2FRjrJFGDmynI(;W
z+*_bQ22s&iY3C6|u5>9OGC4wb3XYFLkwE)lQU07~X)^Z}neva&x@d=```Wn~F_O`#
zjn{~;e-8-%b1<G)g`Z!`@1iNBNe8;`k+UeNGW=lCk7D6`=p*E|a23Teb^54Pi!`im
z%HD<7-r=^%g0@p2&9WgIh_qr8LSQ`c2vIYLe*Gbud}GWJt%~%iMwZ}>gX^-MwgNLR
zVNQhHC(@Kj(MrTc_9-t!18-(_Fc`&$nR-FVe^;S53@(vK)h<{p^+HwH%TlCMW8TBa
z@A{~VB=$sOxK~Z617(U!Qckep#?;1`G9W@8Q_|6$&TwJ*)?q34M6*%Gt3Nlgnl^du
z-u|K(UNF2GidX$21jb)UpV_-wBAOEE7C484btFzGJ@aYz%(fe|=tjse4K~axGu}aa
zf3vm;;|A`)*;`=&6s33_a{WDw6i|p_{AiB3U;80LWOHaAH&qljTGslq%F;b%MmSVa
za5WuasfHjdhGsEUfJPz6mvAOt0FAJx3w^n3=kT*O_h1?|nhpBh3%H;T;$!{SI3ku}
zNR;<?aD*u+3T=!UC9s}n5eNz%y`icJfBFYl-{c};kT#<*(bj*l@U@~1Id+~#s8d5M
zNN2{6VE;?)ZAIAGuh`U9b~|%iT4rE-hX-pjyti_qP#zd?@5(#JGr~&p=gHN;x^Gm-
zpZ%!ELlLtqrubuCG_HwRAPP`YND3|Q^N=H4s1agw%GI+*1HDz6$fdVcNL}gue~G2&
zFmSLX`x$;TNry<5GAqwmHA+nL>pM*1A!#-BQsXI9xF#G)u_%=Z0Ut!Z%!o~L{BWzl
zP#`FB&i_lTx)jEh*{@tgbz*kXsJPeCb2%y$;YP?5wi0)Frz%n$(au-tF=M=Sl#F7x
z@qAygw+-hpUC_2Hu{yPjuZ40He>ll&Ji&R<&(8CbPE*)eLWBfRD4R=&FF?8@_FwPQ
zK}S!KQhDW=UTXBq$BYZ+Y6`S$ISOQ#KV<%JY1Rkx02K!PH?WgEBjscqkJr&Ih=TmY
z!?HXo>|I>g_8qs<o*)3Du~TXFu)jcGM?}KkZR~w4H(mwN<as=TxrOG$e=p*3_%=8X
zKR$~Wuw&6zve?IOl{UsDDxdD1_2_Mjkgn<L?&&X^XiUFJx|o)9vE<Jyd*#jg-O3*P
z&*fMtsEd1lH;uTB1xuabS7?%_+!_4}o*MQgtR+)i8Fa7$EMG`bs*#duq8(0?HUw)v
zfD5k>WXq0p5V?hba_K~$e+u2sDQl)QG>_?AnLWStDb1S4?lM(Vg@L?zGb#NyMP<A@
zNbttUYs*yypXsKFpRX+X`l?Q4MsKX^v6<Pww&$eny_ts<sPYR>CN<7ZFVN?1w?}P|
zvRof?dN$AfF-@uPtkUSN&k(7JtQXmayltuxOf|P8c$C4Z7)Rnuf5Qk_il5@_s1TcR
zT^3_p@EuP<Is6&Shf!1DDl^NtGVcTOY}m3OM5q_3i?Hk!Y*}GZvGN=%GF7aNDQGR0
zY#5ngQ$<UK?|xP5p{fuXG3?eLSd8W^ok=@tw7sOW>(x>!0`!NDp3*5Rc5jCtrVeR=
z$O-c(M9a{!4n+JkfAEI0aL_81rLcpdPJ0hQbnaD3W|!C@?WolZhXf6rQY5Dr1%Bkz
zmuSlw^MW55R@?Z<=4(O-TH%Y6S1-$#MESB%_2PpPDCJ<`!=dwRE~DY$+8Gff^+s7N
zv7SkDNDj15sZUvNoY8??b%;Z;&g#9#r<D*7c_G9hU0MMNf0`y9)3FxAy~h;U)Z%+f
zxS216bI%a-UA*fr;crgWoPu6<i&)TtIXg?NHq*qBAsw^SxrJjjobxVG12TI4Rzi}^
zl9yo`;m2mvn%BsBEKD8UQans_{@;>=^;YI!J&S|=XRt6~x^VJg9lZi%TjD|g_UUvM
z&*NRUJ;D6ae?WR<=4>uVC1K)*l^Dha^UJ@<DPJqo4tGL5WMfF1cRt+eTBaxtbhN&U
zemptvaO0$6W>YX2oy)lm8wlK@kz*`HHu|JC%r})P;8Jgq5~R>1mynXOFGD!p4vUUZ
zxLV4vV#Oj`W*6|%*nm5Y>0l+kuD>PiVx5cDZ@H8}f0&>Gp(4k;j#q*XC%Hp$9!^|0
zlJiSi$~@*n1Wb8>(Ub&mJ;$ntR+0;W>{|3Ai25zIwaHfE(@XwX&UJi<yArRCOw2bC
z9rnFdsG7q?&9;kPW$GP}3tPhu<VDmse}fxJOK&4uce^lKyub7@;n?_zLph^^@9TG?
z62+Yve+i~>+<#jDd!FCa)78+vAgVNBOH%{fsdlQ&3L_2g&vx#*rdetDuG=BVj77k#
zt})YEyP&J6KOg!8t#j>8F$^(yCri?CCy~Jh(h6U`ip>}VG5~=SyXeR8rYJg>763|g
z@E{6D{Xfe#bsEmFpTpS~G*HdDtg_L*mqK*>e=A;GEHWf8L#P6&ieJr;ECuq@4QAzH
zR0k158&SW5G8D!V-LL&SsQu1Zf4hzxSQnR&ktI=|q##ZFfc0|{(Y}wyLv#3A$A8Kd
z@2=O}+S<~VXfWx=<0eib`c}t3m<f|ro8Nr%#7?`Nk@g$-q+OLu8-4s=$=fJ%qN=<~
zfBtv7Vl-z?me3vOt#NeS;mn1}fk_zZR;BPh?3>~`cb09DrC+%ebbC<h`7rwRVl<w_
z)8A&vd~tboee?VNch3$EpMU@2=!YL)zIy%U?f<+xK6(G)r=S1z??!v4-l;XW?>~6>
z=<$<pwo1Co0@XKw$%mq8sBJznU<I@Ze*`j&-N9(7q^uX944ed10F1_Vvk8m-{?^w0
zrdI(@{OIAs`;V;CSU<d`JTM`Lb*LVeCgpCh)KH36p=T-Gv6qCK5leE4Lq?@mtdj+M
z58K!{M&H<+)OYys`rJl|s!2K8&{Nm&@)^sFg^&7~W6K1k%vqsV8TE0KVdaSnf9kuI
zTgH(!U+7Rcq;p+ANm(`YhK{p8l_L6+$fOSH4fWo!I)`QYoA~{TY#Ha1qXV1yTax4@
zV`sV~3FC*7>`Pdb4N|Bc9UPlLr?No2k>^HGnW4g1mFt0$iG+oKuLUudhQ>V2Nc3q;
z>7yB^)ZV05e#?~_-`Lun&$7Luf9e)f?1iwe%(#dty6PvNMO-<s3P9{(7ES4hL}|;Z
ze5)y*%^&u+$ye7VDKFd%O(@8hO5%>OXJ$m4u5?C{^%m)ubQh+z<SL#yo1)4`<n7Gz
zQ!m{VuR^UgHE5tRrjj|7r>qupBI58O8%$393!lYWN)!aUH8an(Qc~vsf93a+2rXa^
z$PR`dXEa23DQi|%Jgo?mUy?1kQ-;oV$cVLrZ(I8AAL&~LlZ#A4-^j72MEx>*#Wx}C
zDAJ2Zi}A#JQvC*@2zUX&U1ni|r)+||2+#2}<V_aiZa7m}S>n^jJKvlk2K-{wN57`t
z5XA$9>&%=;VQa18sF;ELe{h(f-%Jg9IG%Q7L;Q_oV{7|o4i>;`+}e3^RuzTvXU(l0
zYonK3)#Tj+C+~hfx_k?gM+5q^v5|DJPIM-Oso7u^z|*~p7N;Y@p5*mw^im5Qs_wjN
z6{rl4U{E0eXh=5B%Xc{&v$KWXyf1RWLW!21GALEXN#j#$O|uD|e;?~drBev1uYdk|
zE%?;Q_4Nwasnp1J9dkKQQyXPBXey*;Pd^+T9KOM_JMRvuc!k}0cdxP;jHln~2~Unr
zULK01E}%_lbp6?zS4N%}?_Yf<@}Ny;H##R{K%<WT02!=ls7uhJO=yU@`t9>I$GURC
zp{%4w`j8fZUUe3pf9E1C)2*yY*60&6skq9x5az001D|)F0f?GsNc=TB_<lh|RHaz?
zK90{v;oH$-EtKdf<g1@k5y-acS?o@5J7RX2sm*h7Puaa4cC}TxyEly!M4xJ3xqM}{
z`c`5#$`L-|5MXYwgTKtQ6<f_0t;UPCR$*2wh3@;#<y+MWf5-E1>NTt0JK<4-)k;35
ziH3v44CJqwj?Pt_?TNNzgUPT9U!J~b=(Q}^>&29?{`z|GQ4<6((<X}6UaN$9vt$lC
zScz?ePa8qbWfUu$>a$snHbpcF?BMdx;>jgmWY4ME8KLXMULX!F$s(Eyk5B1~XB1(!
z4*!r2QhtF#f2X97)G-6S(#F4P8{^tWzf{R7P5XuA45%D`<6j%&jlLUNLU2Q?^T|sv
z`&Mukj@qqKu$T|ySqVD5c+Ugh&EoV~U>jOs*$9rGXraKmcTH?^d865GwMz$z(YEEi
zCcdB+owg@WOJ`YaN?H4nuSO-cX1A;GHySTSI7q!wf7H96SpyBCP<&oDzQ#E;3Ko;z
z(6V%AtumIZxe7`w;$$0`bDA|zhw(g48U#I3t(K^|#;?=xyi_ew$)$E?>x6(pb(>Pk
zQYNCP&SoR3^gP^mX37&!wB#}^>{rgj11}hj;K=)XnCLF;AWY4}8L`%O|6UdUE5kpP
zD(gb*f5BPUk7gnAe$kIyw#V%eJJEM+JL2Asm+uDDK`k_9aYd|N5mJX*X-zW*#>jB*
z76!u?wonPFmlzFt)@Q>whT3s9j^Zo4LzLj0vpJk5>tS8qE;@?W1y-ucKy|s}y_t+|
z5R*bcAriKxRuBuY%b^nI3q(8|2I%SLEfREqf4UjRGkc9MRQw(MZq;~#m|Gq*hU$Cm
z#n#sLBQKfXV9DWiwwBXbw(w&rEtoo|!^$nCF0%LOLUJ)zF7Iz|fICjS`<az=Z{}g5
zBzfQ`9MMQQPq9jCtWq5ELSnIARfceE60_WyE>}t}hHkq|VAe=d;5dfR%Vgfy-|ZK<
ze@b)$y`X^Du7BhPT9a-dC0O__PV(i(=`VX{u-f5qgS08^Zjp*;@1b*+!&VCl_?w)d
zSCC3?<&eA(DzxFe#)E_-f+Eh}kOg|f&hJL&wmD1(o+C!!*7f7<tvV@>m8^*o2*XL(
z?`W+MUk%5PhUxfGR6sDo)TScFvdy5Ve_W6zwjv{s`Y@ld6X3)MIZd{Hgf|KkpP1(?
z6kHN5JP{aa#^GG%$Sbv6V$CiSprNJRT?`Gr^R&_3^|%Pvk1o;tRW(Lm>sg}PXTy-e
z%Ho~-@e-Bf7176D!=$EAROD>4i}XmWq&o<+!;5AICB36KstE_qf9D?kGoZS}e>W#m
z*AEMXq+ZYF^;gPUpI<Uq?W+iQ6Z{f#6l||=4&Lm*whm(J4BCK@9Nsv%K@~pTETdIL
z?18X5k8m^RSe~@&UxYWj5MSo)AOLj4K*AG(Ey2KHAim)WPO3zOmKvY>ybw6_K@tx(
z`z9reCJ48ggizbFV1ke{a}ot)e`#nQhPQJ>)1FJ!0nIG$T~~h5jZl<9FMMMi@{#pY
zr42pPkYfR_jgcgm-(fJBgd?=|rKs*7trR;#c~WU~P19g{n3ulS;1x_k>Qp&o2KB8$
z8`mA$xOCx}b-)krk57(Xci!%Qf7p3>^y=sYI-<F?mD=Fs%*cT{V~Di0f7|hvS3#T{
zBw-4PiXhKWU>;BBh7n!>g8KvnZ+7kgS(MM9l>)vh4@@!3No};z$Er5)#>D7X>RXFo
z@fVSOiJztVNn8Wp@fffcx-?8ke2$=5Fn0-!0e;@4efTbWtV9GByH4veb2C*Pw%RVd
zLi7jMflPq}uT)Sb9<&p5e;DNdX*?l!NzJCfb#6j)!?H+T@&1cl)td{AVM@=1#Ue|~
ztN`d0Zmf&R#RLdrX)S}r4EgB?$qM)19tjCb)uoZi_8Z%HW)w~NnT!a^K5byiQ(s|S
zS{OCFCuENFg?U<e2nz;r3z)GFywENTFvfRtCk?2@p&^;ffY7mve{R{Ykx)|QBofE%
zpi;>{JKbq+SvRLzyTty+F5@9H8xDk2?vMqxddAlsK6XHJY0@2|!H5xKrV^r^Dq9F*
zZX|0lcG*bN_{qC!kZE$o2AQhRttcx`9l_E)#r+9VUG2E^H45u!EzzFk<`NQ%njIO*
znP`-GeiNTG;xt(Vf65ik9Era&d$4HPNh+8|9dwcR%xDAJmMKyqgTu+JVzg^SNw;w!
zHP~)5BKOH`5jxmO$|6lG$Vd*vK}tzA;ym!A4V2fT%Mb%CFbFj*Jcm+xq<rLs7-Cb*
zUpPq<IZVrkV!NFx*RtvK4BTt~?b~*pH;K_u68G|m#hg~vfAP^EC#J&gjUaAf22=!k
zyfV^43M%d1*XbEMj@=nUB*oxH_ol;(#$iIh4RlnHUE&@AG!DETkFq-!D$_f5#V}Ij
zd~Zq=yewT1TQxY9@al(7P8X(RjX?nWraj9Gx@XuOl=5bPfsC*&(?x@iqCMg90~1Ie
zV;Dy1e1P+ve|!&epv6q##%Us#H4(|0O3p>q|IVRs+HNwDL=kL>C6zS7hvfN&;xvuD
zfmV?V>sAv#qW^KO>lz~waYG(L8yB<SBAf_+${u-D6`1t-0W_@Y$ydxQxzLn#U|H}k
z7w$jBroM!+5Je#;v5RugcfHZ3WS!~`Yv{WeDI!ECe<#$&GtUSy-)XWN3HT(aIEw&5
zDz5-|Ewfu%64WffmJf7}@rIAlQpS`0bQX8vcyR->`r2De>0Y&B<@Rpk#eC5XYyFW|
zyVRk=)%+PmvvC*pu5bXS=jP*wj~_gIY!;)|z&)IXdqf0z-;v%!3BYvYY>{C0A_y4G
z``h1ae=#8QtI)d&;B@$~`RMW1qvk^+YgVwGt6(-k4yBkMb`M5ElZ9eTG88iuD@0Ao
z)i540#Lc>wHlWS>sH&8DWg{W|3Je|p$&4zAj~s`+7@n7nwpRpzqx}|&sJNW^b1WST
ztj6Wiw=JFKSS;#GB`+L{Zp&llna)CV+jQnKf0cV?z8si6)=KS6U}WT?8^)~ZM+q9I
zc9z7g5odwSa$NaMv&<~;B=<o#$Cq1AyiDlfyQ)(*HMK*9%uvz?cdqkkd0X<)C>lq~
z+aRR{Qd*iJA_}8qp7NF<u{|TkmQ#hM?##LNEOiPH`}7ojjLZHWOV`@yZxX$&4Q3WB
ze{@;&u;M6&bXHaNop%REkyY9$QD4?Mn$FO4=IfVbK~9mHonO%OOxDd*IzXKRtv@9U
z36&hrVjm@$@h?+?KVM8HH*aPKQMmSPA=CN}u%LL>k3s{@0hO;D!Xc;)P$65t`?W<W
zT6%8HSe(YXyhTG<=amp8JN;$%?Cx$mf4TdVd)8UMy=2yFm9WpDyj>1Du*`A=jaFbM
z8U^P`3wrtB&B^}Dml}b~t^&)m($itnqxsYlNT)P&hfSZXo`4=^fX(jChRxlQNt3eB
zmPrY`3sbY7ex>TTIm{-SD`Hl<a5L^QGs6D>efVZK_hvH(MJ$iz+xCNA>l$vAe@HlT
z_*~a($MZ1A!l_MNCy8K|kOcTgf?22W6`~R`Zj!!-fQ}{rW?(|;muvx`Qhb*h@G+Q9
zD-G|?(Lja8jqsKWg=GXLBWA_@(cm0=Zd)UYQt`#n!7kcEHQe=HGVer4a<_uNiM8#v
z_x38@o#!lC2-1beUAFwL(1vJCe@2aoGy&qJq-s!M;>d8E_ScicS8reLpB!phHR?yB
z8x>E+5TQ)i?Lh8lFAksmxIz)K+htjM$z;}QcEk(y1;5qKgL$|KUk-wsRujLxf~7QU
z{SZtR!R!Vf{0o+1tNG32Exs%KH%%YtkYU`H+k~(eE%!!=y+P1LSuaFwe;f=Q&M6eb
z#kD_58Uz`yTb#~^F#1P_#%-CzMB@7l)MRfnhz(x<aQN=KH^+w+9RPi${cPE0X4E3H
zv6-i7!hH55<#(X(B(7Pq!HX)sbBSWBJg13Ptkj79)8Y2++Yw$aqxCZ!j^MC~p@(Mk
zAzAR^y%)05XtF?S3P`Q#e-^U=<x|Z{&)Za^@S_WkOpu!3(U!D27W~`i^g)IoEz;=&
zSj6sjxXriH5>`A6@fad)wyDcjQw^$J6Djag&MMF-=J8bW$1$A9IwlvpSAkui{RhYx
z&yQXn;x#w?EXg!5X?IWCS38Y%y}j2$fQeGM3W;C6`M__*jpVO^e*`Uk`(BN*RQ4cC
z`%_h|#xans>_#J)Ol2a}p^L^tSRx#~k|~_@dH*qUq+PS}Itdk+Y*!umDP>~S$IErp
zNQYWkWi&n)HL?z**Ci#3$ulrjUefEOf~<N)>5hu|K9#yeh3C~vwwY6IRAv4iP0QGe
zRn|E6BB^8^JK?Kwf9fRGqNf#emsMn>#h!Jci7Pu%Be`iNtx_}ETdZ3|4y8eGo|SU$
zdkb7cuq_y=#BOA=fA;AuTmO=M@5yAe<w_TaBNV+AqM89*$bvdAQMYJ)>fjik(*2*9
zo};oGxpYT7OQ4Q+7<M4zi;3WhRgKG4q$K#B`IHwsV9LWKf1N8AEGt<M&B7icRrVVx
z21VuQMmRj%YUj-2hfMuEm|io^lby8goTE`Erw>~t<S6F_;`+FKg_S!+{XtNlUgN$*
z+JP?je3{;Big?XXP5i`FLs_|1;wrb2I8BOjcGvZ%$vy9;{5GBYG@9|lgc-G%zeoqz
zeMtbO3Z}p8f0(%3uXBmocNyO)pzU48cyg^UmK(vdvNX1o;mhec4wSm0>Zf=%>L1U8
z*^6lUBA%|uOqibP<G9kUUtvM0e@A0lI4RtG97fY&JgpddEAG%jaJQE&uElbCs9S{n
znLNh{@`)4i*4?#degeG_^!p!zS%k-^cDaQ2CZ#Gje?V?m+_N01vAwwU(*tQIP+ZB_
z$Ju?U35k-{p7T`ZrD;LVR#M;qQrlF@ZHprW_4na?&b-wG?;ulKCZec^NjOj71T;hE
zDK1Z;-0{=#ix(cvMMiXeMP8J_g)mgLmjyX08?&4RS*@@@5Jd;08i(4z<jwkB-lHUH
z>j!O`fB1Ijb<SXP>QdsKQ0!)h9W#eK!&^gxY5q)8Hm|5Hn^(k`O^t+?$0b=}<lUYF
z6hwSJrQ+dQV9)Qqd$s>A=tuEdoD?<wv+g*UVF`?JBI;mb$Y#vcdPQyjtxI8R%TRha
zMJG$Vf)hl1g)Wd2;DPXOyN;dED&sNl+ka+nfBR%<!E7Cr9P;Y@%afy*N3RccQN;GX
zb4PdV^o{V2+}BRQC=uZ6O3qMdiOitp3DC8Yu~YCEkQc!?h?qGp;q&&ov)1;*M~Y)3
zmJ0JNaN>2~5Bj15c9`{XG%HaKGuzDxT=V7SrXsiGJf2+U?YQN{0;#z{F6z*j{j?nn
ze~<Ew;$D!-ME8WVn@&9F#Ixu;nmDs+0-MS@j4_Uc_s6FS`%U-SdAZrWcy<BFXmRc=
zK$iqncG|o2{G(vTE>k<(*UobKi&?q{<U>7>Ue+yz0$qI@&nD|Q0o?%0fvge~1T!qX
zE^S}kL*~izGS*2Bmrlrg{XPttsm@D_f5q4a!ro#+^6v@kNZW?vH-(8_<cH%HiJ&Li
z@IMY8n0{Yd_OU~Jb7a7bxSyg}j9-mWtbI?}*6^TKqVcB!)9_4iE$S^1B9C{J3dZT`
zG(xw^Bqkb^uTM+tRq3qN(gKFM;67>AUE9-e<!ghl6iUh!ZU8zY#prDib)#r_e~E{S
z4%2p*{bzA}5g|kHQ5L)mXE1VMztvpjz8%VRZtDyu12;I2fH2XLPWgq>cFF)Uv$p-q
zVCMA}Aj41QALvxm@+RSxN8j6J6nfj0Z*_0Xg>TiSH6yjd%ew77p1rVl`IUF|m5^vM
zsWb&`Mw%P6)IwgF&`2WwE<P!Xf1GfF!7Y!Hbq03064zzE$Z42^xirv9<s`Bsi|#m@
zBZ_GE#>%6gw`8ltvxH+>vfV0$Ww{D0`l~2G|2=COa*69EsY8)u9#lp@q}1d^R?guk
zL}YHs*=5p$H$T08`DXv1^YZBUM1b0r_maxf#Yo&&h(&|Ay6mDvE+bU(e{%KOVx(PJ
z7zyY!>Ru;y_1UHp!XlGG;#3fjCz_V?+RU&z$W>9?=F7w&K>uhoHqdl(no*3_A_4uO
zVU+Mvs*cv`JeGWNC_BR|DF#w{`0m}CcRPsP!F=sfL#Rv=Qj1mh=6uPz0i?eE4)fOZ
z6g@iBsSMt3-5QYwQ^Yc|e{e!Eh9kWn0V_`6>qSVn<~Gg~*J}6rlV)9XR$T~2)8PcQ
z-6-O0Z#B1@TaEk8#*?kB2M?PM8xNjrO*{7=Z(Vf66E2EvY;V=zzt;=IK!7jLqtQ5l
zzb8Zd^)?!f;wyT5KAa=m!QSMu+oJND&HGyqo7)e**#w3s9zEcXf1+8dxxKZunee~E
zAQ`r{nh&~9o@{SD*}C7{3cq<c=spUzQXPSZjb$D2C0poXYwT7*zZ7EQWg9O7<x9?7
zT8R}pSJ|qyY<EdkzSCDyq7$ne3}oWLnfTR#UprQ?QG_FcOk9-PGGFpA7V3R7qs~&T
z#BkHaBijsK>|7{Of7a;#8aymlMIk9wQKw~%&DT`6OXMvaGqsbNxs@$No6Q88p|mT4
z&StqNW(T@ZH5F3(FEu$cGJG;MleysL$h7%L350VymIB%E8V4zH8ZcQo3GIf2t5DI`
z0a#CQ^XClEoD1ICk-{>zk@adI>N+)UAa!x}<&A@dWkvF}e~NG+nA>I4nmL8Qwo>+M
zO%mZpBis8+U=g#Dv_fpfzSYO_5a&9-c`1c4Q+c`%K}3dogXTw+urI1dg|pn9ZLfHv
zS5U9O6${AdV|2Ay3v*OtUQ4+kbCY_ZtS~U}5>@CHAVXv9QaV)14da4$RSoXfirD0E
zrpUZ6$hhOVe|?x_h1w`@$mF{e;x*omc~X)U(K5&|9EYxM%$i0W=E7ifSt(V0tHGJq
zd<GzM+ESc68=YmX3c`Fxw3_c}eG}T0)A1G$5Qhg6#k(I}ijxZ~H0bx~6j$2a$8ThS
ziv`hCZll7?gCm|&Ll;6!O;uh^+Y_vUjJo6qti5&IfBWCA98n0X{`RDYEYkuhIZ%S^
zxL8s{`$P2m?`vvke}$~;YG^TU%9}-U%U|j+zX<Z~KaWBF*=%o3j9S}iM7zRYYL%3w
zXh5REaTmCWp;+LugbACV8>`i|$tLCtmE+oDU6=|>1eSbl!Xkx&s@kBBCq3<Gu5qkN
ziN!5@f5kCXjHGLYnKf%)c|$B<q~f$u2`7jT`u^R~GuWn}V<&TXg`>i}ri4zY%@ns;
z^Tut$a(jIG_BpE~0e8=0)R@Duj1M28I^qOfl(S&;m06VkWKIcv7t58ah9~0Sq9D~I
z;f!B&Nyouo8h79)u;n`(QS@)YX+<_^F6(w4e_WA~YNgqB3Z$2EuZj}-f7Hj(HM}Qd
zt#8|tk|EPTqpnT#k+gZFs4c?+0qZxEl?m4wq5Xgr>v5|}5hU41^R~sneAG|u+T4J!
zqM5>7VMPKqd)PxQDZ!sxVTtsahDKQExP-mA$JRqD8E>-MTN}wH>xUM$Ysdl;vDQ>M
ze}$Us0n9#LldM(WG159$3h{_${_{8<Aw58Q**YDgV|P9nhp5+>66w|Mz?z{*+d4MP
zM$!;rmtdr#d(BE!?`+52yTY1zPVgEAnw?#K+H<{=o<X9)mcIc}!BdNjrwbp(4}I_H
zwT=IlZ}p){WV0)iP3sBScS3eyR@=<be<g>3>$znBS~87U_+E|fc=RNhAOy4Es^coN
zAZh_ij64;)YZRkJh%1+H6MbMquh>Wi8U6v(@dGtNVu0X0I6^Q|K2Sv**R}POw{kJr
z(v`c(x{O}9R&g~o9a1h2&}6as{TPN9#VzFwJGl$UDgm_r>mwX58u%z_R18N3e`hQh
zO!zf=G2pch(nig~FZq59`n76K88R6T@)hh6jp5^mx9b>D_-Ii8-P&+#zjZeVUE6_+
zxQ5HV23u&Y8^xvF)An&YIlH%qK9{CT#67p8;3_dj{sgasv{yR4EhYpPy;yK~C&)xg
z+}BSI50<X?*39>P1)<cg&olkif8kShJ!EfaD&0)Oj@Zt3#g~Lh!rC@ZE>9IWC;fym
zATC2MkU~Ia1s@Ak4fEh)7yn7D(vxV65Y_m~RZJQz6pL+)g2{P<#LdbcqE@lE7KDL~
zD=6S7?U-z#pR=_OwH#+XF-Ium1;V?Z$05VP#y1Rpdl*i<5@}*eu5w_IfBr9VEpM&i
zSdPaYsr42UA>+b^`2bQ1W}breo3KWnWzM7~ZcA39J+$OQ!n~L1CTX|Q)V-PTydahp
z4`hWNf?bz3k`nFxx|ocmR@|33{60`5W?CHCQp#D?4`FAzIi@QYjgl*COE(sQ3HnY%
zDG-W}L|D6sOgM{02F4xVe;pD7a<A>Dqp0mcJtZ0}ir`Z!pVnc|fF(&TcKA|CAcC<t
z7Z9vxBd2P|JREnZoWgm>VLuAcJeXBZKab%j^Nn_wkZwA5Cu{%HX<PbxLrZJi>D#dT
zp)SBHlLqlOU`t$aTVX~|gT-hxQO@DP?$gFpGZ5lCXRF}Ryy_Vze=a#8RVX-7@R-k)
zmhS#7#<nUpY}cY4QxCW@@}8+m^<r8VQNY<#i5_T@iJ`<w2r~rIMw9&|uO1Fcz?|t<
zgeA~Aw(3cakcKCaRAW57$aSRVy;$X)B7NU^=T>=NWq7`~csfG@PBSJkg<8H2naDs<
zn9$3rk(`vLw~Db_e<My*G!$s=&fTg-cIX-qM0CV&&Z?Zw*mmhvOaEj{($sV?ALzEz
zir+gB_zg}*8zvE`G`MV6WCDZ;&H^Rq8{~3K6xSk96J9^USewR*Q31@g+lCFKU1Caf
zc?_ZbL(_ES*wC6u?|2-42nC^N(i<)MT$B+~5P|^-2p~2re^B5g!H9U;VQTr|g-|e%
zF9)Of3$cJb`=Vskb!+2qsg^Bum3qpIdC-3w&8KlH9KZo98sT~_{{hcGAwiU=C`pPO
z<xiE0HjwWrg+@k&4`H;uM6j~p^aTVzooW#srz`YhW|SUOxw3^|D_xxjX?I7nQ9W%Z
z^|QMyOcDQ%e=sEslEigfEkv&iiet%9kwIqWD%)5R?EPcXuc4WBt$B9pqs}c?W0ik|
z{M^H1I>L|xr{Ctwy36{jo8u|`T}f?(+sgPR!F=_8*r?xKN&EBa@MOPpun+&B4Tl5O
z%PO9KZ3t%6fbEF;zBhmm(H8-xi~!-&;dGBF)btBGf5zrvhlvC?OXkcnG-OcXLX_3-
z^6q$xWD$%;HzH~mp0J>0(TLR4P{y$8C9xNb;2(^nOQDYlD3?Heu@a?FA-}-ormomq
zrim-}C<G#@fU2$)Gt6Dfq7LKaq!4H#3%Rtjv?y7rar=qvUv#<JANd2WhRYachn`Gk
z<Y8YjfA&DVd>4GF1k^5o6K_;mXIHOfe=#4_p7<4`sWM(CI;wJm{#4VkuLh&bPDo*+
z`CAT{&ENUPkZ2iJxyeh15tVlu*=A3wE)(y4%@{Mpv79ZnH(U()BNBgJEA5hnbRmj2
z8^#puFgKi3%C{z^MkXh<(#+H7#B6I&y+o5he~oE7Nz|FTL8O&26K25lLNaq%lORRE
znU}OXk^=N8ddncOorLQNOv>%7nWXE&gOzC+<$6|=?pwwz%|j;5HA&AmM=!GJXNMFl
zsxv;GMj(cb)<#rFSpGI{&1@4!V~D#o15|+-5TTu#3b(qBZN%3Dv{3j?Mia)zaepxi
ze}%Tk2+C;;YTh*rY-RSn7Ll@D=L3P`FhEJ9VZ~fs_!LdXC<*tu?6&i|mc^fQ+}Qv8
z3gMY^FWoVTetT0eqKlwfjWoNjC3spY#fTY=;y6ES%XE6E7xCV&oNk?2T$wxOBkKl=
z=VfOB%6(rHkZ}!JWV8e|Rsl>55-(Rqe~|cWH0BJ{h~@WQ2I2w#S{RG@Wpo+73<K8<
zwu?0hn^@Z<b_B(Ypcr8j8(y{XEiS&OWvG%*+gBaNrJR^qqM*NtHJ6hU+IF*@bu79v
zxnYixBTim)1E6Dkbwzu{zwrKB#c~$Y5vP&IVkbJxaa~^DUju}DdaR0E=YMyDf9s@-
zH<{}I78Zkv|57;dIb5kIT0Em8%cW6cWci$I|K0Ly`3$>$C1(Y*KHWRbhiIA84?pU^
z5P2t=hxi=t-eJ@<L!AyYNp0_O5sbpzb0gQ2qm!51QDxYKmqJwO#@8qDRH|Vsd$t~S
z*yNW@omYnkNBf=QlXr*vuc#y<f9zR28ik}*CJ4S)bY;eHQOoiUjdoMI!mE-S%x?=|
zW%C>Ugl7G0KAv`j5By!=9iDdoXFjKI2<o>AgTAAMrII=!I@RlDkp%=#nRc=mbIWsO
za}Q$40;QRweygmk>22OmIWw`2>8*l>6Djh;^5k)Qz3Gg(qM-l{jK{17f4P2xFLasF
z08m)AM~F-ka!+;(%TF_~N@Z4TM>dgHP}A~Gd5iiHjAw;irLjo7>Q0fBt4~W_{F%|&
zaNOkOT9&K{ET*~Vmj=m2iC`WvbCIq11kEV;B;tV@6h1;QKydjdJ6l`cbZiboZy3i=
z9!%H(Jt^1-kVK0EGK|iLe_=BB_~M{8PvR@9b8xVO>+)$r?-L|j^co(wCoua@OKQ=T
z&KUZGLsLzki}0p33dY?&Oobhhol{UcGgV@w3-uFkeieJuI7y;z1j(WhGQK-`^;QrF
zWW6{!c`Hu<slCCHQ|)uL!Sg@qL&+*Q^#!l6?KV^P7etmkiHN{!f412#n+KwB$#ML%
zI*Xrn70ribq-Mv;NtXj9ye>Rr#^hK)Nl;fxoBy7cD6$u--1IDZojL2>B(x+mmCouI
z*Nu^hkPL;$)UxOAYX3fXUT{~_t<8S#4{(=iHz2#(!O-Zc8zO4Kd7H!7e(nawN?UIS
zGYm1*R#U%c?Ob)%e`#<ot<>)|plJ<#7d!ohyg3_uhpOKbArfG$;g_ua)v;c;tK#t(
z17`Aac(!*Z$8Ys~W)7I1Q;`LSezi^UulGS&;n1?yZuBK~6v=N_=!e%aXZH(&^IuH*
zb#re>PJK1u{4)l0Ag7)0)porF6zu;*xSokef|mD|?96?(f1v|Ox-8=^bLc4Ze-=^i
zf<nSG6GE6%R!My4B0rMGn){c%U+u`^DcZq(*f48eKN+F>&A1zx2BX;Rr#>&5HwAJq
zf|vB2BWfS>m1rfYH>TPD5|S^&*$uXu(Y;-iZjr4R@zDF`pt14ciu9{^b^(vqZlRpc
zomHnZ-|f?Lf0?z0@P0B^E3ebo`84$-8cvD|EFD2%Shy65b6O1jEY8Lwbu7E8GEk5l
zAP#!RxhYorMpf(Eg2ZQ(sckCmLYEd?yl2>^Mo=(pv+~RkEAXCcl+rr@yWPz0As-P9
zPwQtqm>Ktbha(p+^j3$ME_bZUIi|AJ2`5<Gw4z|Ce-o}ygmB7T36>a-XqQmwcb}B=
zz+&7T%~tdmdi%6HiqAXD5cTd}rCpy>G2C~k4CjG)<s(~$73m<0apstv{c2)~bpKxY
z3W(yCyFy?h`E~-?BrD{Urp4b#ctfe3*h&gfhi2QD+a+?Wtz4aj%-WNB9<8C;?d$Dr
zQ{Izgf0+Zo5}@)G$zz@kl$*9^RuH?i6Dx%V5g+HaMbe{m9HFpibzua^Lvd=26~6PQ
zA~H-G9LBJw{X?iC*k`Oqt}R?<s)i#S&Jya$_Ng2Q6*z_U-O9mJJm$SJ1%Zn}WTHgn
zvD#7{smR=M&}}_Y8titPX{~kf5?x$KQ9)_Ee=9O;x``JQd>qYs0&)6|<~|cG&{$~{
z>NqQR$he916mx6vVpIySLK8S?hsnfeLJ132IT~??fz@^VY50u2!uT0^5p-5&$T|LE
z1zo~2LtkaYP$<U7zXR#97mQbkHUo?Uda+&a@|=u!kf&+GI5dC3Lu4MFl{KDAI4HLS
ze^-<9CuN^Z(kCz#T4J*>R7xO2!8w*iTY6l~BksB1LJoPQm4S_-6||xlP1K=dI!8;@
zm#qfyO2V|NC9NJ#siM)8`D7N1x{L7vvyS_Nnh^DOJheUVRsIQ0a_a`Qg{6mp_^4Ku
z8!y5huIzC%8VIZ>!|)*gV;CeDgPjP3e;MZ-K}7%vbHVIJn1IqsV5)G8dh7ifTn2Po
z<e?>kt;f_m8-0!7bVwaswS#LIAG;6CzO6V0Hf1YL9_~8)sM=D-)BCmi+gsx74Qmes
z){(GHgHd2tHN^G1p%(^81bYJcP~x>k*jLegpePYdo)l%hV{m3sw>A32wr$%+$Lcs8
z+qUt<NvC7mw$-t1+qTh3Zr*e1-0$4`)vme!%vp1-8b4O8+H>qN!eQZYY;&;$YsQvG
zxp9HH2>_gdp+YU<Fwr|||FxhDF@`CU(2v$^i3UY=WXXGKWH+WjK<W5G<P<qSGEINH
z10<Cd2~Hisx`|^_C~+3c-zPop(<TovPAF*?c}UQgkmj_>(wcm$-f2$}p<|f4A>bSC
zd+osjP8i}GZvIa}?7}Nj+{dB}h=)ZraBU#@qbfbs5pg;=nIO(jj)~tFQ^8me+O;cC
z!SAqtw^h-q;B+W1HmbhTEd->9<NJ&t#D+`=Cnn`(o@XWMM`Pi0g?97<<tzof9vrOS
zNB4)32swysmzd0@E2L42NGuvi`j}6LjOj6m_9*;sjj~Vl#21Lr?F*o*yDYHje(eBF
zN$p2!(=g_nv6ym2JpuF@7{|Nh<YM|4s#f%hsAv6>IPDQE8@pu0dE?TZ(eCu8%^L89
z;6|etp>x-zRg{ZPQ9`8zNk61dQs`DARR?|cWa-#G;S{Egc|1J`oP`qVeut}Qfi=*~
zh)ToI-e_XF{Vo;Rm^K#RC&HDYaE1d~i2LGWJ(af<5<fCt<e-O&B-Q*L27MNh8i#)o
z^c!codJ(svO)g=GD`G!r(rG)lp50_p+2T`_bIowZREq%$el2uZu`6wQj9%tQv|9HO
zzz_geNzyk{IsD(0r$7$a;4^N_#{H26^59`2f4p#}1{bK`Qe-F37AEdmu&V>ViS<xK
zWKb=WEJO2_xyEjJ^D@I6J2(=2W+fKQfmSs4i=ON-OPXo1Rf4Hijm!bgO(Y7UG%^OJ
zgug8$$2+o?G_&Zc3cr8GhG)`;WL1qREkfX~mvn%ZjKU~(A)Pm&AKLq)ICFDcDW)uU
zU2A3(=2-LZ6ztopx?2nhS=k7fu^H(1`Q~y?mi?*z{($4f)uizH+jB@2&D{KN$gIGZ
zpLKlAAl9uKlho09rDQe+SLZ+RGMl}Zdi?rKno4a%*joq}3_1o`6!e7!yaJ|uMGnoG
zs*E!%eOE1vmadyh%dMwp#Xs;s#Y2)MjmSD-Y+i}Kc9Br~(Jjp<^-<=)kl*wZf0WaV
z^ZjhzZ<;RqpPTiSo|<g+HNaawFfjLdIXfXnNLNyu?IP19)lkN_0)O-%fCf;a<F0ir
z{yeZwTK_3)eDf(SM?Vd@GH#%oXu-j@LouZhf#VlWX`O|`OjMbL(Y8Zz&_}V{nmBQN
zR$<}?^V+3|7O+$kvx`v&oX*iVgl@*c5^zhU2(&9!7!K^7`D-ZsL9^5hJIriF`DC!H
zBQf65Ii^PlA=@^j#T^8{0jdPCEP+bz!j(8W3R_!G+F^skHoj{no+-|u4#`B2DqUB5
zTu|yBX9^oTKA<7**VaNxho`7VDI^zmn>p@`D@@;R(PW+VwN%>-R9etRaoB^q;hSe6
z&(eTuV<081ub%N1VUg=h_Rjtq)UZS{^9|+JAkF8cpNu|E2#dAfb8blCZICw9*-r+H
zVp>?0;a2u~GPe)iD?xpTOjGS{nVx$ks1ZJDCkIt@kDe7CXZrb!734J$+S}Sru}Iu^
zEcX6-#WY>^bSsPpzQ}D*xS1-1qrr0g{KfM7og@Q`N9WRlFd7Dw<}AexCA&Iw{@oHR
zXC40MTx8ayEEmEavYUygd&3emKEs)b9`0j7E*_r~;ZrEX-LFmj{{7?;lZRSwc(V`8
zhOSnaP{&9Nd->e$Zy8QXZg+Im0@kpA{wk9wQAZRKQ&mVX;J{_o^LAGfZ_n|yewWW*
z;GM{2mzJ!c!fd)BctX%+-fgBCgl`vHC438=z8GLs@RB~^x831P(?*;V0jT}^K@yc@
zmFTuD{y>Kas2mH5SsU`bDWd6?a$QM*=99&=4(bdH^iyXmR*i`m+i@Cul4=l?;Q2os
zPImo16wx_5pe*~|>(~<yYl-&In5Ljd#WI-8!{6BW5(h)W26~5o^(~`|(UD&;$Bomx
zIleeU7mzR7ad6fa#!vBu7^{CXIh)V5Jum{LsH-!O!H`&OQxXI58iHf!%L;PaLDVY$
zDWeelkipd;>#7$Qu4GWDx^Xvp8(i_&t@=@So1+hI3}nyKJ*M`ayvOXK3nsgGj6F2u
z>*4N->>Fqww_mtsMJ}v4SX0!d5A@1<i7M<Z)uqc9zv|2t?Xi0wy{JbhSYP_tDh1mP
zr>xkNwPE{yK`K(17eaDaXq8NIbUeN_Q8oFX6cEqi)44U;SN!|i8HN8IOST~9><}fl
z)a)_w5O|&AmV}bHagTR0b-68zWj%@g0jZHpOPWKrfGk2+)kRV)*KCM%XXO+yYoxg!
z{}BC5FWhiPZqlsrmW?iFm&(A$)#(K+G}U&5BXd{-OY$1t_iqAC3K0s^d`awxY%H(j
zJh(8TjbXpPK6h;o=ZtnQ1AmpZ#jaZ?Dl;tD0hVO}Sp*-4gA|g|KkO#9ZTYqy7x@%?
z)}?)K<)5v;1mE}lzWp{kw8wPHr(o>F3V&KJbAK>zH*7^YZ=F`MIRC~$$BqqrVeurc
zRbu?52hji{lG2tML72vcR$$&{FtM???gy5%8#S-*Hhpzj>{9Y{(Rfs>+R@LcgkgYQ
z1q>mQ+Z&za)+4uW9dGpYi)rmQOP|Q)e%js%y}h3xzi~Dfm?1&g<mUKT64+r4Y{-(w
zCtZL;mK;7GQ4pPIjnBh4WX@C&)A{-IZ13iRS+`-+ndxcpV_#<)soIb^J^WiMl<b(h
za$o2$A~asI1S$0A#xyFi85yFUjnNQX4v+x?<YMz)#+%|rok>kw!-OEKJVdC*`TVqA
zubS|RBy9+-Lpf-;U+%P)4W9BNj(w_n46%Q(CZ{|&^|usd-`LtR9+tisiS<%#be&M2
zrm_&y4>KOb<0kWmH8<aJ?q@hudLI}}BxX%gDrPWmH$S*yVva`(UG^Et0x_=-KVV|+
z+&|OA3G0<xREPxvap7|*LT*)eLuaq8mAu&F20S>76fG%7S@>&|s<i0X<k(-P{7DO$
zr&pA_u1vvCKw%RbEwYebD7Ps90w_@w6FRXkh~{QLZom6AVHAM$uNkLy>fV}o0{bM)
zOey5YrsoM-9q)0%M0#dx1`u9@B;eG8CKxG)z8J++Z@BfXV$4l*??o^kdO>3_Ba#n{
zVW~1tXL<G$x|SihEKTzhIfL3xzuw||KX53KrU^|fas{N=X!6IGN;&3$*?BF6%9FQI
zQ7sd$7OV@Hc%BC8p6jU$=F5!V6nIin+WmZC-d)KAuTuC`7RjmqX&rH5ArNB#WtAyy
zPbxGgm@zu>`z}{xLo!fgu=|PplI`n?%jmj)K5VVYXgHMg?KnHpYyTENbcMdQ)7|{B
zS!HU8duJ&mQj>>>W%=hK6pqVamb~LEa!=0yU%iUpB!bz0&6PnnBbIsXv&D}?Jc^0X
zgEUBAI)RZQl*F#=rX>>V50IB<aPfy%UOVEV32HpO_8%>sMPYzG1O}`~_pAZ(6pfU6
zD1jnwB%|5|)dO06Gf4WJlLXh!3R=&`#V{?}vE{4NI+0)O#<TmsVtw(>>TWVG>HSSr
z?^#=sSgl97I7Oy`6^KaPLaQs%We$xVjsq|b>3S6I0F1H1<O|Q-Wnh*7(ugYqfq(Q4
z{G*j1TQc}a0&1XaMR6r}-JDcH=AUzc)MOt1y~k*GhGNC>;TvqV1i@RUdB9^EK|(#~
z-;k83^{AY_ViPk4tK}C#|DOb+JN`4BP7Art3=I$7WEKKuKc3<syrl$<8vC`lqfk>F
zezSjytiCFUQoo=XUIFo&P4RcO{3UxJ+6B8Jml!)El8^DSY<ml%rV9ig=n=>_JCyaV
z0>G}_ZgU9S(ozZ(Wik4hKx5u?s4SBgmjW&KM4u5y;5a5`Zqk;dAv|x4Fi8&Pcyw<d
z1QMC`mB97*v)+Ro1M?e4AMf4;-RF?{YIE3cs!gVksqT^hNWgk*>h5dEvqnN;rgxLJ
zVHcFZIP2`Ngds0grV@M!a+CumzE-{?4&y%d!GAVF42UH2g4T2jRd1V--?n;w74#oT
zzw%$XD|D0-%(}zSPu9&u5Cm*@8ZKDzDiv|0afOU`k*ljgJ%rR2*_VtNt=l1j(CVG+
z`qL-VbB?M|D}YpR$V0AFMn?`|@^g*Zr@S|1AdJg-o-22Xk4j61Ka`Hy&M%$NhzN5~
z3H=vLD(fQ_U2XIlszv|twTl#%RLhDM@UijE+u}&w$a6XDiIS(S@*ngZsk7jgi!4<`
zj$7&r)UJz>!hVbXH4ufx^%4!zL*A&200K}k*$E90K7hh|pd1)pzXwmierEwHhUTG8
z=NR@OB1ew$dzs~CilfLbWnlgO`MeDaDawN&(qiMl%V`p{3J@l7=B9-iuwxUu#!m!E
zGY#aBkcN7V8u6MI(8(HFFa%$6^y3kL?|`OZ_^Nj;?hKlE#T?`)LW&M}N)Ia?4Dv%8
z>Bfo!Lx3-|c#kz60VfdlZPLtAfl1{bcQ}bWd5S8h?$>Dbb5h;hl#GWyrh>r;yeAM~
z=cgd>Ta(;FD9rjkM8n5q=BM_vUElhHy1{O2Gj6N5BWfcc7mCvCK*S6)qb^Uvhdw)e
z{Zxf+Jn{Jiq-yii>{sL{(Ld4ZT#cX;Vbew{`9S+@LxaPfvq#YmF8L@l2wQF>efps~
zwZmzffHb5Sl+<#r2&@m|zZDB~_dJA1pTndA?QZ1!p_+)t6Q3ED+@7IOQJVNMRgrGM
z-o{o94VUiE*JIUuqWo=OODO-4lv)`Lgyn6N^<D=7L*>>=7>9?GrJ(o6X4mV^e^^DL
zz;!dG{L?=kR$c!L1N_<DDzcd<6Ee6Of~Xt4so^ULJ*WBSCc^%UYlApIYwd(Daoqn3
zQ+7LvB@~*+!Sy}Rx>%(Q95{Oz5q4S=We88$QoF7M#YyhaD*})$rUZdDe>=-5X+>IM
zrXnSVwbw;c{t`TWDNM+h3wZopL_ZFEuzE2q(}P(qKVuu=XFaz^)}}t&74>qXY!2rc
zrMAkUmL!2&U~q?P)@1(tS)aCdb&<<xKRsnAXFz5Mp#lHlgYF)Lav@Uk??d40WJc(H
z_zOu<77QF6002M(ilnMUI_=m_wBP`MC~5!z4}cHww0CuJH8RyVv0+lQcTTy;20;SC
z0Kkf4W0n3Bt{!jz5bzT)0N}q~O<K10`<$qs+4{Yd8d7#9MeBqPVLPBYvsajK56wT;
zt%Y9TgOV*WTIx$N$~>a_Kc2lq3%4ljo0(o?rO<is5pz7vLMJB8-IExvn_0WjUeV~L
zj&koR@S6YmjG`uu`2eSB8-UuMQCij2ZbiKHlV<8^<>(m*3Bw{up|@4PG))JFI`ws5
z-HOhsbZ!4AQ;4)M93Rb&6x`JBkB!JJI|+5Sx680rb9k$+@$-U|!!JGwk+S@Grwh$F
zKev7xOP`)V{9SXgyd``DX_b7FX$!G$NvmvzF1tuGjjkYGwkn+t#s-8Uk);FpazX^#
zM3}un@WHOoM6K39#i?vZ1Kf7fFHIO9LF(I*@+yKuugWV=u~{sc)sk8Iw_!RAX4_Cq
zr`VG5TPM=i9+Q^kv-nNd9I&Z&HA6q@=2O(RECyAlz!eZ*=%k>#h{!Bdt&EqC1`T*E
z$<6i;yLcluG-g9FRe{?6NuckU#yAT66;_6o)Po7CNrdam>;KGMoUCi%g7cb`#6ftR
zmKP&_Q>~2u%wN@hFKSc?=WmMt7b%~*aPlYQ_ru=Sm1=Ss+Wy5GbgEH6(Tc3Cj9uYa
zP1?3Z@u8C0#w82#4dr&3I?)UrOI2knWNL=}{OTc8MUAz>AR<uy$J2(52t&Q@LT^9V
z<Go^&M$dCAQ<<;jp`{^jmV`1NXx^3r=s97JgZWRG3<1HR#z2M49OOgFq*^)8#-;P=
zH{-)vA&3B6$mOirWrl@ZO!G3i(zF!wWa$^cXke8K8L?6>2*QfZ^vB0+9Y*!t1w75)
zENby==y)r{jTN9Nmk6}lLA-EB?(GeDT+xy@@v9dVo05ZbtD7hs1k(aIw8i|01$-jw
z*|s(r<)fcESQ?uOMq2nqVCMF3xfNHi>5`ye`(hEQq764pZ(iXDha2^x4Ot`5^r+LI
z@?4gUlPqd@I8%C~WI8aCH0Bx0Qy-{MyTZeWjRib2lto}y)TRPDA)Y{C|4+5TOmbJZ
ziiQy`p1Rz^r4hrinwOF3$^+#f>A%zfx=z~hXwE%R&`Fj|X#bJMCy~7x0;v6AT?3Bk
zseCtA0sMNrKyZVxiU~31Kh&9J7HMUq`}oA|h=euzL~wQ2y$fep@+TK0dImZI|ITTX
z&hV0gD%pXQUx|5WY+;>TLS_@h_uzBxEt*@J@ceo{$hcj0`pP|&EDMRMq4sQf{UPgD
zK;mZXwAK$>)r_-kS}}n&1rkC&qSN8TW51xq@=@qLNwsi(9YceGz#=Z^+Gu1>Wwo?0
zyJfDJ8k@h=<A(2~kcRjbI+Q(5ttXQw$lp34qgTKM4ZN7fW&Zg+t35Cm>vWdmt^>*X
zBibDWkIe```aRXl5}LrJTjS-`sIMLp%g+O>P(9U!rW|{1dis!euH5c4_gUl9sEmng
zAtlWLgT^QzZRFJj<VH+*f%riAlB=z+`O7Sm<tA6E^Q&e}d50YAk-UG>Ulq0J#8VbD
z`~sjUVj&3+pm{%cv*IANuX5K)1}2`Xh$rAkZb(n&ALMSCY6m3V!9Qs0KKMV64&C-u
zhAWq~z8u(>#m;E9Ze6Y_VRg05_eOPgDT=Wv2)$9RzO0$rP|f?K%LsFSaB|Vwh7k1|
zVV_pP-t)ViqoAJwq`A?C1}Z+RhBOh8RHVQY?`fs$brz{26BI@xU6Z-s-tK#YI;}1p
zAgl;+4cMONJFnbzyXWvX17R&0mZ#qd<{n1O!x+{V=zc6XRKBsTRK2r(Eo^xm-1dHV
zm;!9lgAgewop0J;jIq_LqIri1P3iR??AZVH!v_l>{MYAX&f;{oOF`Y{#lr`lsuIXD
z{@MSJ<!-hEoyJy!)y5}=Up>9SehOny1A<f_X%70sz_O|#6Gkl?O$u6HfP>mMm*zbb
z_rcQu<PIMVes8u^z1y(5Mc=Ze+a5O(8yAEu^d|%O^GGw#<@}KR7Rh`}TuI3@&eMbb
z_1`o(GJAAPg4Zfa@yO9>An}%Hs0VPZhY2FTKNVC+#3;MRD;>WUzfc7$JQDiPowFUM
zZ4Scf?Yeq*$H(&XcGw@DzH?_dUi741Rl6y{rmC)!^ALucO?mu-hSc2FXwj$2OE&+T
zNu=bUd|s^7r@O5gMAozRnolv4K-$c`G=1s*G99XTP-WuhHrgL$x<egAjDLW^E-=)!
z^R^ZhgrS?mylu1c{OvV*)z;}(=07y~5<bQVce3m#of!v$l5jPc-KqRD(3*#<x9T)X
zzEnRyVR-%|q12H@Hov0Xr;Ij!x)(CeT!PX#nVV*~^$crkz$xAjR`<?vfN!y0&NL6B
z&6y4qUX83T`QJyfVL)f6s2~DAtQeQr61R3D%<TcD*pl$a-gO?Imf$pyvMz$4*j@FK
zSDWp0YgRdR+&HM+6#aTyGm)tqzgpsd8zJKi)aZY`JWZ}&Uuh<(7%7CoT3Mfvk1v+9
zSA|6iI9Aa3&YrE?7!t8LaiNKwYphHBag1>jKS7@w<M|BXBRyGxpZwN1N4wy-+DD`<
z2{!(qqZ7Crs~*mD>hDt5DwtY8StsQ+fG^3{OJvrN=R`kDMa}RbQyS#K_QfKK9D%W%
zV6tB*Fuxi_4gQ5e-{R#yT@|;R=~1^xq`=9~F~TJLuh(nhF9CTR#E2BO8+}*4kV5%}
zOvGjYEv@e=`6sfttp6F%9Vc(rV!_%;c9=KH29rLE!63{))g`LvO-%vr$~I-~Iw(Lo
ztnoDRVS2bG6=Dn~8b_a5fwHC6H}vI=rO+UZk<8Soh|TkSQ1L8GM*l4`jA><ib^Ld*
zdF9fjDR{U2?Ym&OSt3D=_10ZZWpDHTj|N=g2#D)I+1|U*587}b8nRfVz?e%Tu9-uu
zv(UTc(`UDv!nHScIRB95ekyVJ)1=p7=hymMMZzra^>3lPD9&eK1<f7aj0lvOO6NPr
z3;&)U*_t2Q+^0?AB-zEZ_;MqIkVK$w))CH4ya`-$jMOLC{w#%>8P6*#kv^(r=UzSk
z*&YA5^0d5!<oqZQsj(j-D_@yJnny<mle*|434)cS@RS(#4!p?teRWCZBsw6zIy#((
z)eastQWfTygG#NdU|h9a;*>io9=Y%XSN!Mf(Lx)yt<T%33rKk`ydM{wO;e|#z1zjY
z)_h*QA`A7%mGPtzs#x(JceRkj)h|vpPsQ!m#+=59dS^V~ajCEsx>GTBCSZyn`k&6P
zd^HzU8uoWST$%D^GYq0aPF{R`;RRd<%CnVSR}-5B392IGM*9l3@@Ol%CC+}mCG{3g
zU%!C%z9~q&js)?50X_q!^hG4NZ5oL-elCK$kbC*#?aKLAT$dlg^=8|oWl3#SBFIED
zoK-3XKT39hP7uE~h3-fywBeA>sz39=F>d3+Cz46WG>wJg5irDqoB>9Dw&NNGLg=$5
zG@NysSNN^4XLChY_SZLJXTRvq-Y#+Y6yeaDRf9>K+EM-?Bm>QqmI(RhrxApK#q|@q
zz;-;BcjA~Q-o;I9Di^l1<O@6CU$YCH)cj<4+eK+$KS=264xc9xBv!e_PUXEAec^T8
zWn=izV0D&TXKMS47v<CC_aN5kBlNft+@7d{Xp^92=mfKZZIO$h<BbCzDYO`e;tcvM
zr}T?dW!KQEHlzDoY*Rm`#IAf6XsH@fN_8@Q7XMq^TTc*p$eWt~^%h28$G(}31GBzg
zFeV3ts;0R#7w(2-M!UT2KPW9*VH<9lN?N|bQVA0w4Wg>|6MdBp-ci0;C~k%x2~Ba}
zJ2Df6f(6MIFL}V>4htg*=^{MIE^%<so}Xr*znPRWeFHXQJprZlh@}wnK^O2d93?E>
z&ReViwMsiTo22N_f6QBpt_A1k1=sviZkee;LdVJ4-7f_izKH(M<sGzhR3v$1IM9a>
z03db+05AdgDS|m5II!Qhb@r!5rSjO=1aDpiAnWt$=B=Z~u?C+o*G=B+iJgxAzkd1T
z@GKZ4ca0aRJ=6B^zACJN*D;d$k^Nnt9Fx<DS3;HMW;0UQ@bjs9cC-ms7^FR^!h_!>
za9q-sW1yJDx~eYmVt&c2>b~~2ZH$w-f^h%@9^xm=03qv%5>*crPuamll{-$@Zp9p5
zU|lHfdur%zJid6>^f5bonZ3r#Z%BS}A&EcCzwaJ$Eb1SGv<|4=sFUZ#pA1<2jw}zn
z4-hb9yQMnw6)u2ZK{~S<Vl*6O_GzhXK<O#aUQ9|y{7K*!Q!m+_gs_q)2;9{@H|6yS
zzDaLFB20qo$NLvFX1ulVF3^zjQNSA<xIlE|36m0-&_A5`)?p!DzoU<xkuj~JDG45R
zmmuZAGgT1ckIREFnkR%}(=G*<C%{Uq&^k?+5LP7p8F%L&2S+Pt-u>l1(iN*hIdJ`j
z<He8l9Am>tCP_LLkAG(>0w%M?1jsj^V|;P>k*45R>{m95MRo`GGv7kQ&Nqh<XvHsN
zCI}Uf2^%qyFBQh96aOb%M@VV@2*m2obP2tjVgaJ?+$wn2s3W=}p_aC`)~K-MDm#n5
zbMg@Ts89j6pm^WiHNWJ6<!GPz^s6MhuYu)f(obQg;!0{}%_E_lQfR~0gY7znc};Rq
z0SgJuv5x`xkQzCoi{4z8va|7@z*M36*BCkST&U0<SJ%LED^NuUH++E*KQRHyv%#E@
zhLZyx443piYO*#(k(9%-^aom*Il9HxQq`&CVKcF@3s!kFzQ8w#=asxvbMA$NAS9&`
z$oKk~*aeJ8FJV}0G;hcq5nN{XdO6MfvKuCh@jb9F(ovQmzbGTjB^+mE;1j>8qs$be
zLU6cP6LfFSTJXVDb_a3ku^}uOr{@mKE?Xc3YIZDhU^EK-54uGd$UBgcO<xL>_V^d@
z(DG0%Qx+S2BI*wuin?i)b)rB)NEt;gQe$yhGycM^ZN`w<g&0&btbuk*#ok{e77GmN
z?GaFsHGsp0MRn=9WK)!Npi@sf^0=dFGY9dMemEM!@BLEck-^c)U4<~?+^sd*yjiD_
z1mXDd@VE^B&EzJkC{A)RTu~-3w}dCVXTO<W=|my-2yo-l=cYeW$dp)#fh67xhYrjH
zq)pGls5jj|v@4X~dQhL^-f+T!-O8QfdXzUnQJmUDL1iUcare0=;M0l907tx&`|XT#
zFFWKFdZq!OpkP6k7iLiiCxGY(v>dQ*sx6sv+=4|^4N`}1VLFjs=8As9+~n#9V<bAC
zfL%^siHi+_@9X|6zE~Dr8PsmLohY9CF`r(5S|wXIYs94%>ha-++LI<PIB6v{;jYDq
zh{KJgX18@ufM`q(uy=FI*Y`nPo4Crm3O+H>%0)P5lj^DnuJq%5li#xU-%3<Lu(5hM
zecCgqhV)3P<XkvE3@%zBIS4!ahG(D%&?wmeyp<YUfG|kj-=g1Kdr{Bk{IW``MXl#l
z48j<*a1NPPA3|aS46Lui-TzP(QJs1vITOkgr{Am}5tjxGsPeCL(t~=30S9)mAMN0)
zikW^5P4mXP`yw?IVss+6$sfF&!PJymNv>Hh@N8l<tDL2cD(z;J;Ga1=?xr>cV(bGv
zzTX+6#mQ^A+zDLlXTfhdKNYE>6*0}=F%sQTm-mVq!feRmezhhOL=gE(beIk@v<0q5
zP8rWXh0W#0!0@^}(q+MEWmZV6sp9&BDA-pRRhT}4{zACk9c(W`$v<=Riy`>GMl1XY
z>mWD(;(;lz{i1_Oq#2y;%JX5ac4%}3=$2!I`r#N+r*he-u2}xivnE7CQ@uM3y_*_!
zAK^e^?Pjd6r;IE;J_P~EcFE(XiLQuu8@r14bvG!Z2U_wXR$@ac%FICw5Y8nRB72}H
z8ZevjFYaicd#XOBDlO@$WowrqKqcmau=MUzE^d~ese-R5X@l;PI;(^m`>z;Fdl@8=
zF5w49TbgDdpx2fV8ym3X{xaN!h06(3%;{%dkmU?3T!Bv+0v{qfG^Au`Dd{(jpZ5<c
zPRk+81ortvQ~@Z_nL!vFeF$$FLuEjEY_eosQXLGka&$8{4;*?}gO(s5&<{vB@g^k?
z9<RUxH15~!CAT4h)QoVg+2JGEE8wI=Sv(<PZ$~FzE)ICZP|bS5BTd=K(o<Ko@UfY?
z7rLP+Xf7|S0f%z+tC2905{S03s$FA@in=);z;kIt7&a6|MuNeD!-kgkGi~&lEqA1#
zCXJRjFK>)3W>6TTDBRbhh)}E0$SR+53O#aje1j3g<cMS(5kHYm1<gRtfV5CU8nQbx
zO9Q0yw3QAkl5(q<kx{(CRv3Q%RuY`Q`8SIB1%&OF9-7E}%;Q*y9!Is+Yjg+xk&LYE
zK-)+yWPDKG_QcC{GZ6qmTKEaf2mA0%=yc=u8CV-y6{kFfoJ5lm=ONn(m@vps80a}<
z(S(Zq%iZz-C|EFp`4cC$0~bB@sC0vHHDms8(zq=glSp~gcU&*5p*?oBIO^aAX3PUt
z$)=&XTtSA>s#KcB{%4-n10xWF`)mmbU`v8h(=)yWbSbqQoso{H!HM@B2DFR#%Fg<&
zXP{8>0nM0COlfMh!gi3kr{2*>M2!e|F_aw|<+-XrFUZW_jEo9`f72W+k>blS)9OzG
zD}PP{e-dTV$WbOOkzOUa^(EokB^Asm2R-5jQ!2usx1&Nu+gy#3uxh~|cV1Wt1Jm;n
zgb6uyxAk<R6sl`B-Kmb%c?rjY*1XhvMv{4x3RN4C#6|OIU>$+2ogU7tjG)**OC-Um
zyAOzwL#kb6gC>RhO#CRs(zd~fXO?$g*~^5Tmkhuf1a-M-6h873Owy%k5guBofK8!8
ze=WYSLv~Qe!6a7VVK!O~oS!1Ifle}YMvAnlC45#gT&2@0xR_UHh0DZc1B<B#f!c`h
zSd*WsI>_8S#Seu|8Icd!o2*%&?%Z+f$ZV-`N7yK7M!yo`e(|lrgoQ;PSRt9SV-mJg
zkiwY0YluU(6<V~a(n8EZ_xOPJp-1Ms?Ar$Hx-q2^H$5`{F!42bQ=Rxy1twY@DdqAB
zcF@e6PH+w`d(>-G2Y9zi&>a)%El$}%8uSU9tM`vcbAl9ve<2DhYpd_Epl9?bwQon6
zRhaJP!P3s7iHB!6gmkgpD5p_$d-<|{I(R6Fy&_+V;RFZpIIibQHeG2fMp^qQ#GNqj
zdzvXdFYZ1*PN(eeOKkGo0^iz2)!d;I3N2$^9sAhB((px@S>vT24+&0?*5Lh{aVxmg
zG}s_yO{@BnL|F?%Jmi^%vBJ2jW;5@cyjG!GmO?2bmFxIN<bM(xV=RTLKoUf7a)3~^
z8jK3Ax|tm;R%CSo*H+*C{+e=>7vCGayIB8%DsH_==fqRP1IG>*0g|;TP<6pH+|M2E
zF))M8dg+u0Ovy1fqjW{!Qk!9=ZRWo%K`X|gl2`3fXi{fUa0oCbp@H#ezhwU4n9$ya
zeE`p43n!m9MKX278^@6_Wj}$)VRCSk#Ii+)%qN>iv(C$+6;CG7vVSF2Cs7X><wA?g
z|B52F6@Y*MYm2OB2WBu^GC-XlK9hTl;fJfEYkjOjd#MX2(JT5#gla)$k&}c<T<W@s
z*k!R+S%5f|LE1!Mi?;KpT0!_6vnXn37I{1qJISN5!$b#V%J;6K(P)7J)<hBfjhjsF
zV-;y3acf0*oa6`e<i@_(dn<61Yo11M=EoqVBhr(x5PEOkfnRwP*wxPt+&tOE$2>y+
zZkn5PL+<EgouG_usM7eij_)hMq;PEJDB;46C2a33xz+b^H6GM+o-98ft3>-Er_yE{
z^M_?H6HW;1&yWt3XIDojX-0Wec*Br0g_!SVjdmr+FEkJENEV-be_g(1IxhA9vb^KB
zka;Cz+(S!~2ZBOF@w$`{h~<Y=aO!wnu%hEHx-ROd7ICnrMxE=JN*E5sm!DFFCsQGH
zA}=$n5V5D7D?axXs<})lQ=xqxr5?5NT%};HCdq}hb5Y^g;qN}7RQo@I48RI|+2%r#
zH>B4hs@9>_Ic}2>Zxzh@XM5ub@9I|DKt#L5K_no5D>w-4x;Ru}B=4SYrl8U<2w~_D
zV4Q)qWY`lJaJbR^^Zk)QX55MF&Ri7)mrWTO6Zn^FrD>}HrYXu)C3ood$seblDkn@}
zfTqREFmP|IzlZ1VP-z@CDQjM!a4NJ3_$jZOAz!a}G)>vGpKh;8(@2r6b5s=(zAtoX
z&Fj34El_7sw*emqX)=J!6#1Q$0TDyk4ylkw^e__yoQHo5H8;)Vp65-2kXG$nAn@>r
zEOAt<hd=uLoF%*UtPMt!v1but!EBX5`BxN+UYe(v)0hVcUg%s{Ew7XfpnHMA01V9m
zgG!_Pr%Y0*cR4t?ms(7CTuwxA-u0mzGJ`xJ1kfG)S~u70RkXDI00~8^u25<!n$G{N
z_hwQ@Xz-(=Ta;H9Z5m@%1glsy1jc=pz?Cw8xC1R!%ac};UIv%)N9nhIJf{;H5bNvB
zkh8?A$ktXDA*7N${Nt|j;OJoQOsATJD{t-BWD`m$8d7}&O0VrgN^|0ca5SQW3~O+b
zG!RgzocY$Ex8`-K2_JD<DjlEBMh+1*-ua7F&^OlL1zzMOO~+i0kB*+Vz+bw5?G}bm
zKBS`pG%GKhZ`^p=5=NB~Tgr?i+k_T(XD{BX0K_RTYo)tqdvNgf)<8|%Dx*Uce1#C-
zK5)g$tAf`_#{A|_(frjMZN92;vNw2dI*?()iNj32K`e5sxWT0#K)Gx@eHcGcrqu?g
zLW@IX#-uO9EAucy#e$}C60GFSQR*1Y56DYYzJlEUae%_pNBEc!F6YpxNsvH&q{fUu
zj}GA%@EcIa$fp$)yTS$`V17r6=17{VdJR6F<<n+SQaQ}`O6`!CDybzcsAXb;09?}8
z5fS)1Err2u=Q}=i;oO{>X6@VcgRqi#u#GWh+LP8eY;DIwi}{4A&A94XY~&q*02AQ;
zICe8RtGSbXFNw)1@50%fxR{pAuYA3`iC~Xaf!d>?a=3!#>IFIUzWX@4iqoqtp$LaI
zs(41`8)+bC^R?gnPv(2p`P<RD9hfe7luGN&ANTiFYQYbH*q~--De!U8v4WGJhoL~P
z9L*K+V3>9{HX}I>Vk$Zq0@B;xi@Pdx+F3Eu`_5+t(XOzeb5t%d<tcA`?*r!f6NBEF
zvq61_o=2xfV;X=lDy(hnQZ_fRylD%g?TAsLqJ5h>P1$O;GT~-Wbv~_Z1+=c)BY|<f
z5f6n#gPe8M;3b@e@x~QL(hd8ew&gVm_OoABM=ZrO8at3I2wS@(UV$07Z#~REHJ>gS
z_-lH00VFS?<Dl4*esETQ99b;(j)M+t(eK;mQ#%#6c4i@ap4>HXL?vt-Qx1~hq|(Ko
zyr}%{QLl!!Ek>|Bt5~R!6-cJJTs2wxV?!fMsmn!GeinA21Pa|F6Q4IHerqPKCZy^&
zo-3~VEQiJS8MaRp_75ugl6?OxgfFB`qi{b!XZBhvXDLPNq3I9pl)0&h>k0JM>OMD(
zMu0*B?pRZUF|(z?{v>Kvy*#!4K}yE&kiqXGumg-I1A-;PxK)@a4ye5ry$WA4D_1m@
z6SU?VFOeCH;X?FlM3jQqy3$?~&2ylxh-W>uL4bTcQT{e(SW3<#usiSt?{&Hb{p<H0
zyb8$G6PoYFX8<u#+JR&v7f6nMBi6~+;nU5(i=!0Ue}Rm{U+owj@fXo3bazMES4WEe
z?98J1jC_5}ico!2^gv%D<jYG`JU<a|_Snw5=>fk8aO$*33O_Hl<PQ~m=F}V2ExA~{
zpWZcHKz)2?pU&bh;-hH(qtuV6>Q5DuM_pu3pB8Zd6^U|+8BOVLg9t1SBaut=K71Av
zkLdIWQ%`SZq38~Uz?XG>^UwX7S)b2yv<&bH?YrF26)<DREnqsvKHRAmm+RVE(13Iq
z&z#DRCNVSkt0n_a8SQr7TORMvpfvO;f6m3952G4-6s6ji@c|tooz1#YC$$@(Xb1mh
zEVKzf6+4W}(hSpeZM9$H^~-FlJNnJfMQ4MO#9jmbE}De6Yy3>9v8pDavlXf-t|Uaz
z{%PH=StGe;dIWr<@O8c?2vc+P#k8juord$gRY_s|a%8FUT<rF9<ij~_npum|(UEUr
z)5*v%x%YM|8}G%LjGTV{E#4IhV8dkKD5Xu+XS`p{_#3gPD~8O$buIPv_)_KV^1AzQ
zQT2Xxw>MA@sH#)1+gt4?8yWBpDc?yghI0-0I|MBu%mnP^&KBy6R2EPhIG5ohy5NB%
zi%e{OyvAfaj8qL-8XHTLAS^pZrM+g*;G@sFa&g90bnRkY3B<j&0_$=nWSs=(h!N%@
zyncGRmX;5rp`Gt}YcD-L_hQK?CrW;U;<(b<M7BNA4sc-B(ClH*gZ=)v$5~}4sC?<_
zpe@T;qY4!Ji3uiP_Sos_EYK7i>E#FZKIVS=8IvgUIvRzgK|qa`<KTJ|Ep+ZIBy^8#
zmh`(0X2tbL9M#D|xRQ#{?|zIH{56n%j$QuA-R%fpWFAIuaU12{UgoT^KY`qMbn3ar
zO;#pwx+)TULh-f=Z@r{<puox0ecR-X*%VyVbQI_wTp8dp+gcyYTzu<Z<L>@U&lCKB
z2Swq`WuufJ>k}?k7M9#=3NIKqWB`%J#;nH^pRhnwHu95{R)GaXB1nTHlsjk8`h{9f
z9Yti0nrXz2?Pt_8I<HJyc$dR=iMSyLw_1ouEA!NWX)W&|a>LNo;royK50pw6EEE|e
zx)b2DvdxFVSgelTI1$)}H^lzm?b24~*s`ZNnv9v#eks~cxWQ#TM_~??7?H@r{V+#I
zRzm&gXKBo_e#=(ku+E|sl-i#}q7|CZTk6fvZjXa$SO@7vMSs1L>=>$Fo8EbP_RFO`
zox-fg`gjtbKdu%zZk<5lOH>QGD%8v5OKgFP%NqI+6LhSDr?}CM0!0N%O|nVi2xA*A
z&1HL(iRkY4j(3nLgle%czM0dQCe=F06T$jkZ$k#7>h$aD8`uy(1@TX#7gedncP*Dm
zms#o9e|Bpf#!n}yo`WvaSiPy*r5_RGy(d5+UD&PtJw{=q9&S!26xg40$Od)?R)qjJ
z_!aLXYvwP;MpLw4@F^lAPWv#Ix;HylUUvz7zV3Ir1Ov78`NzDuiA)mmgG%o7D3(AP
z>};yq_IKXxG%@d<m&eu@?hfhjO<{KC@%bmDucGwQxR?{iC&cp9tWy6ve%7km5##}K
z6m)#ue1o3}l6)tK^6G_l-+4_#;!}Ve>q)gTfp2DuLBS_!<6&8!PwNq_Rbpm&qK!|o
zhk<s|RGQNNss>rmiyB)nb0}2EDIQfQOI#Q*qA2{zoj=){CPgkLKOU~mWx$JYg9R*Z
z@^H^6rPDAqtCZX38!cvNrIVF4ey?>*&#k`D6?&C~+vrC2(a190LOleuDH8)Jm4qsS
z2e_Zq7g$GIW7sIj>**knD!9WCC<x;96X|275#$K&o74yU$p=~T=v2N=GIZ}gxUu)k
zvb*=jn9%Kj4S)Ik34uRB3C%o;IW<Q>;DVAulsNT~y(Pf-b%Ro*Vf2()nRt?_Rfrb0
zoEl^kgjHwOLN4=F9NC>ciwl7a>W>s}I8zelRE%5epm>Jb1A^@YF$#hknO`DXkxbh;
zKG+(pnWCG9WY5Rrx9<%|_G--LnoUxgTi~W!wuZWEm9WWBrW7~|EJn2?wu&6h9B7Mb
z(3F-%jEfsSiwq-H<UeDKjeJN&Ux~F(O=BKJ#rZRl^1K%txW1M*njV17j$pgrItG~H
zc^I_obqS(>!x4YXH~v6gkRnQ9hhExhB4$rU7<D@n{qp9?gz6@7r)YBsI3u;>CJ*bU
zNVzwe=tXV5QB!ap1N}ENnG_l!zOaT!dGQk4%s)4Cdqh@etpQyk5?5*zogmBuWh#A;
zR;;{M+sSu9r&`)cV5$UkY;Hny!T!x)@u=-AAx-#PLO<q(Hl38uwh!+uPQx|B*vT1@
zyvZ0{Y8`;v#<s*9@6noij7B3Z3N$jFhUS`!<ruevBG56db-(<!j`HEIRLHDUzD$TX
zPA9!r>;e<--qL?oBP7MiINAWGuja%NNPaSVfgtd}h|WC5m%#+KKmMi@zl%8~wBFA2
z4tf0*QHN0L-WKs2jds>pg+mq`<aoda+Ic$xk9x>iXiB4QP!hMA%*mw((fT}<g!^CI
z5Ilq6Qfl1DEeqLFGmEug;YTo`y6KcBJc#PpLF1Sh?N^o!yEgcJog^>2ul_CTWSBK;
zYLWJupsjz4_Z2GeZfD|E@Hty>!kwMhJ9?_VpDChk_Y!eFX6K?cP<iv@Tp&kXq24Cr
z(rLc`A!IhQydQ75tx#3L{0z&zMCl622U>#L?eabTS%2!UqnqMcJct?i4L4osRLcFg
zlGWCbhJ>7qIXXSI!Kn+-O&M)$LOj;op8Yx4ybsu~f93-W{yRq0Sdn5xlUnvD_of%Q
zuvlnYK(%oAIRElU{}vYqlHRo{{IgrI#XSMZyNbLSUe?_hyx^3_Td-EjRUI0wsOMmM
zR2A5D`zyubgRY!`YZj|6ThIVK&G#=hBzA0zH6-$E@y^{FPTkX<N@OIlvBSZKQ5{uD
z8;_kARzDAL{u%jqorzv2y#DptVm?yobJHTu9RD!~s#_!Ov~o<QKlx|~d96kDue(^7
z^LjttAe`57ug-G$eF4DY=!A_R3xqC)J}8Tdf|}&lI&-Sv*yZHx?CK?H<+aBBja8=s
z$H3_AXp2QGGT*E}hvUwG&`pF|W!t;=${<ZCk9Qxi{rU}KTQ4{MuUf@<)gA<<*C_{l
zK*bolWY*E`0Kxry@0VM=H%u54%SU?ujma0G9Tk#Y5xSVL)Ewt5$&0~6oo5Z<7|OMh
z?`qrZWqF-9U|oAvrXc)!*2o$$t@1r)%nV3dPJ&Ty>S@_QeSJqDo177h`UMv8d$U|O
z@yj3B2eKOF5LEE-pgVrs`LZlK!qTybkpYGan4L%JIYNkL_CxgTI=;^ijKm|Sk=)tI
z97;$dW8mx2a1+NnGFVRBAB~%RuEW8#)awQ1%LoL4p#7W8fujzX3|xj7&gTFb4Eb~Z
zz+1|qSMfD{M3AcJ&_faDC<PCuDt9nPPEZW=2PlDM;BKzw(Ie$p+-))i=853w&77o>
z(D}<SPS@^`!LPSO_N-Nd0!G_b;RyTiJ2RJ-OCy0GAn_NRZ5s#eE;u$-D#35?7qfpm
z8`Pjauc|fS8k;CF0^c_cdK_4*5<@grDH0cCpz(!^7ou^j0K5C{O|5N4?@gp@y(GYN
zW&tkc2fMwnUVc%3na{B6&;#3Qd#E6Q*6uLF2oHaa(dT@4O%HzCiiQBYq{6Bt@@8%;
zH49fvwUobUhH${Sf2+IMs;NuL<4ztdi>_?6ys&U6edf<W^NF0*1Z5X#Eq>Q=w!=~U
za)?8fWvru0oA~B2f4kd6oI#sYba9}fl4u*=@i_>zaohp|H)Z1BjIbON-O2-AM7(ol
zA|Bw?|CKcf0WhxdD;E?#OdJ;sHOWF|<NW!0!s1tJw~k8~mpW1Z+u1t?w--fFh3OjA
zIkHYj+*FSDc1L}<=TaY;i#xKoymXo<*2&Ctj-SWv-7Z$W2iE59^y{%*bu{qzCTsT3
zNu@f0^%?gM7FNzkI<|{h45mdw_YQFW_;ovbRUMVbAG?62D^J_Sgl7fBE^8!4K_9eG
zA}hoffhf7@nDLB=mBA}_RSuP2yEZAaLg}~&*XHgmRVW`i#j0vo($6z8MFn#;+IS+4
zdi%-}i?42*nzXl`qBWc<q(z`zwI;L?e1(S7@I-qa(;Z=Pl@Lw{$%O_5u{%a8molPt
zTcP2dnEbDYw?(>$U$N)IdWuo=^W?db+dZDu_!(-U#1TZoC?l0)x^u^Gdag64rTG>@
zWBUZu+s-fJi(1sze3UNPEj%1gA4AXjBV;=k{^zWy2!P64$JAE>UQgimT&Mcn^7_+f
zWlFGm;-QQ;nL1fhh(g7&2N9;#G34p);?9DuVTJKw#Xx~d-?@SS4`u8Yd68VqpNg9@
z7QD9DOU`}=kEpQ*p2?7|Q`F1S$!{y}qPyoK-R<}`KbM53vi!lDmLr*l@*=UB0NpTQ
zchZBG(K_eZ{@CF^iMGJ`$;xMj&3-+xt@Mni>aXRk^rB~Vr@F$>02?(s4Gf&~#E(Bv
z0)Gg4)1c}zFtXw&elYMj@?Plod556W7_V?*-<e!3@DqtE%2>OMjwY_F`P{JUaLBUK
z>UN%9Pgk$GKBptnoojF0<oy-;HJ6v?NRL2cA>#c7`0r6Q)PIPkvnb^WuNw4Fr+kr0
z-%)av2?CPvt#k?Yp9Gf{?f)biEVK}zzkf)bPw+INfeJ(JQ~HjEGNUhiXio4&kutkq
zdkM8bN+_wIex>h7*aXrbvrxhQpP~47zm%a&5R(6C*z&y+EC>RiZeE1Q-bAC30RXU0
zfyf5o1j|M0O99CN!6)|l7CgK4kFI>o1pp>@LBA=m*x%$>0087WE(2g0Q@pZ4$iOPh
z`~SQA|K`m8pYp;0wds_NY!K4_&}IL-{QsWP|L=0}^%Q{|5Q6{A>;J3#KX|10<bdFT
z&b+3i{U`H{)_-LLzcV791&9A0z5(sU0RYhd*ES0S5Z?j+o4M=iU}ETE%IM&!rV0W0
ze|wO811n-NP)K-)4*)c10RU+KpB{=}ASsHuAUI&(nEz)@OhnlK0^)y=Pl?S1!2}B=
z21zN)1;GRR#{a)|rv0CtiJ6slUwv1XexnBWzq(!qToQu-^!05GE$#I6|JR28|3qR%
WSqRAgtOoP_@PD(5iAn#1|NjHRU0+`S

diff --git a/youtube-dl.1 b/youtube-dl.1
index 28e8311..8090af4 100644
--- a/youtube-dl.1
+++ b/youtube-dl.1
@@ -1,7 +1,7 @@
 .TH YOUTUBE\-DL 1 "" 
 .SH NAME
 .PP
-youtube\-dl
+youtube\-dl \- download videos from youtube.com or other video platforms
 .SH SYNOPSIS
 .PP
 \f[B]youtube\-dl\f[] OPTIONS (#options) URL [URL...]
@@ -141,7 +141,7 @@ redistribute it or use it however you like.
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ video\ (currently\ youtube\ only)
 \-\-list\-subs\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ lists\ all\ available\ subtitles\ for\ the\ video
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (currently\ youtube\ only)
-\-\-sub\-format\ LANG\ \ \ \ \ \ \ \ \ \ subtitle\ format\ [srt/sbv]\ (default=srt)
+\-\-sub\-format\ FORMAT\ \ \ \ \ \ \ \ subtitle\ format\ [srt/sbv]\ (default=srt)
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (currently\ youtube\ only)
 \-\-sub\-lang\ LANG\ \ \ \ \ \ \ \ \ \ \ \ language\ of\ the\ subtitles\ to\ download\ (optional)
 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ use\ IETF\ language\ tags\ like\ \[aq]en\[aq]
diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py
index f4ce480..f21c66c 100644
--- a/youtube_dl/FileDownloader.py
+++ b/youtube_dl/FileDownloader.py
@@ -1,8 +1,3 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from __future__ import absolute_import
-
 import math
 import io
 import os
@@ -18,7 +13,7 @@ if os.name == 'nt':
     import ctypes
 
 from .utils import *
-from .InfoExtractors import get_info_extractor
+from .extractor import get_info_extractor
 
 
 class FileDownloader(object):
diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py
index 17e0f83..672ef9e 100755
--- a/youtube_dl/InfoExtractors.py
+++ b/youtube_dl/InfoExtractors.py
@@ -1,4615 +1,4 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
+# Legacy file for backwards compatibility, use youtube_dl.extractor instead!
 
-from __future__ import absolute_import
-
-import base64
-import datetime
-import itertools
-import netrc
-import os
-import re
-import socket
-import time
-import email.utils
-import xml.etree.ElementTree
-import random
-import math
-import operator
-import hashlib
-import binascii
-import urllib
-
-from .utils import *
-
-
-class InfoExtractor(object):
-    """Information Extractor class.
-
-    Information extractors are the classes that, given a URL, extract
-    information about the video (or videos) the URL refers to. This
-    information includes the real video URL, the video title, author and
-    others. The information is stored in a dictionary which is then
-    passed to the FileDownloader. The FileDownloader processes this
-    information possibly downloading the video to the file system, among
-    other possible outcomes.
-
-    The dictionaries must include the following fields:
-
-    id:             Video identifier.
-    url:            Final video URL.
-    title:          Video title, unescaped.
-    ext:            Video filename extension.
-
-    The following fields are optional:
-
-    format:         The video format, defaults to ext (used for --get-format)
-    thumbnail:      Full URL to a video thumbnail image.
-    description:    One-line video description.
-    uploader:       Full name of the video uploader.
-    upload_date:    Video upload date (YYYYMMDD).
-    uploader_id:    Nickname or id of the video uploader.
-    location:       Physical location of the video.
-    player_url:     SWF Player URL (used for rtmpdump).
-    subtitles:      The subtitle file contents.
-    urlhandle:      [internal] The urlHandle to be used to download the file,
-                    like returned by urllib.request.urlopen
-
-    The fields should all be Unicode strings.
-
-    Subclasses of this one should re-define the _real_initialize() and
-    _real_extract() methods and define a _VALID_URL regexp.
-    Probably, they should also be added to the list of extractors.
-
-    _real_extract() must return a *list* of information dictionaries as
-    described above.
-
-    Finally, the _WORKING attribute should be set to False for broken IEs
-    in order to warn the users and skip the tests.
-    """
-
-    _ready = False
-    _downloader = None
-    _WORKING = True
-
-    def __init__(self, downloader=None):
-        """Constructor. Receives an optional downloader."""
-        self._ready = False
-        self.set_downloader(downloader)
-
-    @classmethod
-    def suitable(cls, url):
-        """Receives a URL and returns True if suitable for this IE."""
-        return re.match(cls._VALID_URL, url) is not None
-
-    @classmethod
-    def working(cls):
-        """Getter method for _WORKING."""
-        return cls._WORKING
-
-    def initialize(self):
-        """Initializes an instance (authentication, etc)."""
-        if not self._ready:
-            self._real_initialize()
-            self._ready = True
-
-    def extract(self, url):
-        """Extracts URL information and returns it in list of dicts."""
-        self.initialize()
-        return self._real_extract(url)
-
-    def set_downloader(self, downloader):
-        """Sets the downloader for this IE."""
-        self._downloader = downloader
-
-    def _real_initialize(self):
-        """Real initialization process. Redefine in subclasses."""
-        pass
-
-    def _real_extract(self, url):
-        """Real extraction process. Redefine in subclasses."""
-        pass
-
-    @property
-    def IE_NAME(self):
-        return type(self).__name__[:-2]
-
-    def _request_webpage(self, url_or_request, video_id, note=None, errnote=None):
-        """ Returns the response handle """
-        if note is None:
-            self.report_download_webpage(video_id)
-        elif note is not False:
-            self.to_screen(u'%s: %s' % (video_id, note))
-        try:
-            return compat_urllib_request.urlopen(url_or_request)
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            if errnote is None:
-                errnote = u'Unable to download webpage'
-            raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2])
-
-    def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None):
-        """ Returns a tuple (page content as string, URL handle) """
-        urlh = self._request_webpage(url_or_request, video_id, note, errnote)
-        content_type = urlh.headers.get('Content-Type', '')
-        m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
-        if m:
-            encoding = m.group(1)
-        else:
-            encoding = 'utf-8'
-        webpage_bytes = urlh.read()
-        if self._downloader.params.get('dump_intermediate_pages', False):
-            try:
-                url = url_or_request.get_full_url()
-            except AttributeError:
-                url = url_or_request
-            self.to_screen(u'Dumping request to ' + url)
-            dump = base64.b64encode(webpage_bytes).decode('ascii')
-            self._downloader.to_screen(dump)
-        content = webpage_bytes.decode(encoding, 'replace')
-        return (content, urlh)
-
-    def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
-        """ Returns the data of the page as a string """
-        return self._download_webpage_handle(url_or_request, video_id, note, errnote)[0]
-
-    def to_screen(self, msg):
-        """Print msg to screen, prefixing it with '[ie_name]'"""
-        self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg))
-
-    def report_extraction(self, id_or_name):
-        """Report information extraction."""
-        self.to_screen(u'%s: Extracting information' % id_or_name)
-
-    def report_download_webpage(self, video_id):
-        """Report webpage download."""
-        self.to_screen(u'%s: Downloading webpage' % video_id)
-
-    def report_age_confirmation(self):
-        """Report attempt to confirm age."""
-        self.to_screen(u'Confirming age')
-
-    #Methods for following #608
-    #They set the correct value of the '_type' key
-    def video_result(self, video_info):
-        """Returns a video"""
-        video_info['_type'] = 'video'
-        return video_info
-    def url_result(self, url, ie=None):
-        """Returns a url that points to a page that should be processed"""
-        #TODO: ie should be the class used for getting the info
-        video_info = {'_type': 'url',
-                      'url': url,
-                      'ie_key': ie}
-        return video_info
-    def playlist_result(self, entries, playlist_id=None, playlist_title=None):
-        """Returns a playlist"""
-        video_info = {'_type': 'playlist',
-                      'entries': entries}
-        if playlist_id:
-            video_info['id'] = playlist_id
-        if playlist_title:
-            video_info['title'] = playlist_title
-        return video_info
-
-    def _search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
-        """
-        Perform a regex search on the given string, using a single or a list of
-        patterns returning the first matching group.
-        In case of failure return a default value or raise a WARNING or a
-        ExtractorError, depending on fatal, specifying the field name.
-        """
-        if isinstance(pattern, (str, compat_str, compiled_regex_type)):
-            mobj = re.search(pattern, string, flags)
-        else:
-            for p in pattern:
-                mobj = re.search(p, string, flags)
-                if mobj: break
-
-        if sys.stderr.isatty() and os.name != 'nt':
-            _name = u'\033[0;34m%s\033[0m' % name
-        else:
-            _name = name
-
-        if mobj:
-            # return the first matching group
-            return next(g for g in mobj.groups() if g is not None)
-        elif default is not None:
-            return default
-        elif fatal:
-            raise ExtractorError(u'Unable to extract %s' % _name)
-        else:
-            self._downloader.report_warning(u'unable to extract %s; '
-                u'please report this issue on GitHub.' % _name)
-            return None
-
-    def _html_search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
-        """
-        Like _search_regex, but strips HTML tags and unescapes entities.
-        """
-        res = self._search_regex(pattern, string, name, default, fatal, flags)
-        if res:
-            return clean_html(res).strip()
-        else:
-            return res
-
-class SearchInfoExtractor(InfoExtractor):
-    """
-    Base class for paged search queries extractors.
-    They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
-    Instances should define _SEARCH_KEY and _MAX_RESULTS.
-    """
-
-    @classmethod
-    def _make_valid_url(cls):
-        return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
-
-    @classmethod
-    def suitable(cls, url):
-        return re.match(cls._make_valid_url(), url) is not None
-
-    def _real_extract(self, query):
-        mobj = re.match(self._make_valid_url(), query)
-        if mobj is None:
-            raise ExtractorError(u'Invalid search query "%s"' % query)
-
-        prefix = mobj.group('prefix')
-        query = mobj.group('query')
-        if prefix == '':
-            return self._get_n_results(query, 1)
-        elif prefix == 'all':
-            return self._get_n_results(query, self._MAX_RESULTS)
-        else:
-            n = int(prefix)
-            if n <= 0:
-                raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query))
-            elif n > self._MAX_RESULTS:
-                self._downloader.report_warning(u'%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
-                n = self._MAX_RESULTS
-            return self._get_n_results(query, n)
-
-    def _get_n_results(self, query, n):
-        """Get a specified number of results for a query"""
-        raise NotImplementedError("This method must be implemented by sublclasses")
-
-
-class YoutubeIE(InfoExtractor):
-    """Information extractor for youtube.com."""
-
-    _VALID_URL = r"""^
-                     (
-                         (?:https?://)?                                       # http(s):// (optional)
-                         (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/|
-                            tube\.majestyc\.net/)                             # the various hostnames, with wildcard subdomains
-                         (?:.*?\#/)?                                          # handle anchor (#/) redirect urls
-                         (?:                                                  # the various things that can precede the ID:
-                             (?:(?:v|embed|e)/)                               # v/ or embed/ or e/
-                             |(?:                                             # or the v= param in all its forms
-                                 (?:watch(?:_popup)?(?:\.php)?)?              # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
-                                 (?:\?|\#!?)                                  # the params delimiter ? or # or #!
-                                 (?:.*?&)?                                    # any other preceding param (like /?s=tuff&v=xxxx)
-                                 v=
-                             )
-                         )?                                                   # optional -> youtube.com/xxxx is OK
-                     )?                                                       # all until now is optional -> you can pass the naked ID
-                     ([0-9A-Za-z_-]+)                                         # here is it! the YouTube video ID
-                     (?(1).+)?                                                # if we found the ID, everything can follow
-                     $"""
-    _LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
-    _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
-    _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
-    _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
-    _NETRC_MACHINE = 'youtube'
-    # Listed in order of quality
-    _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13']
-    _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13']
-    _video_extensions = {
-        '13': '3gp',
-        '17': 'mp4',
-        '18': 'mp4',
-        '22': 'mp4',
-        '37': 'mp4',
-        '38': 'video', # You actually don't know if this will be MOV, AVI or whatever
-        '43': 'webm',
-        '44': 'webm',
-        '45': 'webm',
-        '46': 'webm',
-    }
-    _video_dimensions = {
-        '5': '240x400',
-        '6': '???',
-        '13': '???',
-        '17': '144x176',
-        '18': '360x640',
-        '22': '720x1280',
-        '34': '360x640',
-        '35': '480x854',
-        '37': '1080x1920',
-        '38': '3072x4096',
-        '43': '360x640',
-        '44': '480x854',
-        '45': '720x1280',
-        '46': '1080x1920',
-    }
-    IE_NAME = u'youtube'
-
-    @classmethod
-    def suitable(cls, url):
-        """Receives a URL and returns True if suitable for this IE."""
-        if YoutubePlaylistIE.suitable(url): return False
-        return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
-
-    def report_lang(self):
-        """Report attempt to set language."""
-        self.to_screen(u'Setting language')
-
-    def report_login(self):
-        """Report attempt to log in."""
-        self.to_screen(u'Logging in')
-
-    def report_video_webpage_download(self, video_id):
-        """Report attempt to download video webpage."""
-        self.to_screen(u'%s: Downloading video webpage' % video_id)
-
-    def report_video_info_webpage_download(self, video_id):
-        """Report attempt to download video info webpage."""
-        self.to_screen(u'%s: Downloading video info webpage' % video_id)
-
-    def report_video_subtitles_download(self, video_id):
-        """Report attempt to download video info webpage."""
-        self.to_screen(u'%s: Checking available subtitles' % video_id)
-
-    def report_video_subtitles_request(self, video_id, sub_lang, format):
-        """Report attempt to download video info webpage."""
-        self.to_screen(u'%s: Downloading video subtitles for %s.%s' % (video_id, sub_lang, format))
-
-    def report_video_subtitles_available(self, video_id, sub_lang_list):
-        """Report available subtitles."""
-        sub_lang = ",".join(list(sub_lang_list.keys()))
-        self.to_screen(u'%s: Available subtitles for video: %s' % (video_id, sub_lang))
-
-    def report_information_extraction(self, video_id):
-        """Report attempt to extract video information."""
-        self.to_screen(u'%s: Extracting video information' % video_id)
-
-    def report_unavailable_format(self, video_id, format):
-        """Report extracted video URL."""
-        self.to_screen(u'%s: Format %s not available' % (video_id, format))
-
-    def report_rtmp_download(self):
-        """Indicate the download will use the RTMP protocol."""
-        self.to_screen(u'RTMP download detected')
-
-    def _get_available_subtitles(self, video_id):
-        self.report_video_subtitles_download(video_id)
-        request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
-        try:
-            sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            return (u'unable to download video subtitles: %s' % compat_str(err), None)
-        sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
-        sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list)
-        if not sub_lang_list:
-            return (u'video doesn\'t have subtitles', None)
-        return sub_lang_list
-
-    def _list_available_subtitles(self, video_id):
-        sub_lang_list = self._get_available_subtitles(video_id)
-        self.report_video_subtitles_available(video_id, sub_lang_list)
-
-    def _request_subtitle(self, sub_lang, sub_name, video_id, format):
-        """
-        Return tuple:
-        (error_message, sub_lang, sub)
-        """
-        self.report_video_subtitles_request(video_id, sub_lang, format)
-        params = compat_urllib_parse.urlencode({
-            'lang': sub_lang,
-            'name': sub_name,
-            'v': video_id,
-            'fmt': format,
-        })
-        url = 'http://www.youtube.com/api/timedtext?' + params
-        try:
-            sub = compat_urllib_request.urlopen(url).read().decode('utf-8')
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            return (u'unable to download video subtitles: %s' % compat_str(err), None, None)
-        if not sub:
-            return (u'Did not fetch video subtitles', None, None)
-        return (None, sub_lang, sub)
-
-    def _request_automatic_caption(self, video_id, webpage):
-        """We need the webpage for getting the captions url, pass it as an
-           argument to speed up the process."""
-        sub_lang = self._downloader.params.get('subtitleslang')
-        sub_format = self._downloader.params.get('subtitlesformat')
-        self.to_screen(u'%s: Looking for automatic captions' % video_id)
-        mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
-        err_msg = u'Couldn\'t find automatic captions for "%s"' % sub_lang
-        if mobj is None:
-            return [(err_msg, None, None)]
-        player_config = json.loads(mobj.group(1))
-        try:
-            args = player_config[u'args']
-            caption_url = args[u'ttsurl']
-            timestamp = args[u'timestamp']
-            params = compat_urllib_parse.urlencode({
-                'lang': 'en',
-                'tlang': sub_lang,
-                'fmt': sub_format,
-                'ts': timestamp,
-                'kind': 'asr',
-            })
-            subtitles_url = caption_url + '&' + params
-            sub = self._download_webpage(subtitles_url, video_id, u'Downloading automatic captions')
-            return [(None, sub_lang, sub)]
-        except KeyError:
-            return [(err_msg, None, None)]
-
-    def _extract_subtitle(self, video_id):
-        """
-        Return a list with a tuple:
-        [(error_message, sub_lang, sub)]
-        """
-        sub_lang_list = self._get_available_subtitles(video_id)
-        sub_format = self._downloader.params.get('subtitlesformat')
-        if  isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
-            return [(sub_lang_list[0], None, None)]
-        if self._downloader.params.get('subtitleslang', False):
-            sub_lang = self._downloader.params.get('subtitleslang')
-        elif 'en' in sub_lang_list:
-            sub_lang = 'en'
-        else:
-            sub_lang = list(sub_lang_list.keys())[0]
-        if not sub_lang in sub_lang_list:
-            return [(u'no closed captions found in the specified language "%s"' % sub_lang, None, None)]
-
-        subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
-        return [subtitle]
-
-    def _extract_all_subtitles(self, video_id):
-        sub_lang_list = self._get_available_subtitles(video_id)
-        sub_format = self._downloader.params.get('subtitlesformat')
-        if  isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
-            return [(sub_lang_list[0], None, None)]
-        subtitles = []
-        for sub_lang in sub_lang_list:
-            subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
-            subtitles.append(subtitle)
-        return subtitles
-
-    def _print_formats(self, formats):
-        print('Available formats:')
-        for x in formats:
-            print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???')))
-
-    def _real_initialize(self):
-        if self._downloader is None:
-            return
-
-        username = None
-        password = None
-        downloader_params = self._downloader.params
-
-        # Attempt to use provided username and password or .netrc data
-        if downloader_params.get('username', None) is not None:
-            username = downloader_params['username']
-            password = downloader_params['password']
-        elif downloader_params.get('usenetrc', False):
-            try:
-                info = netrc.netrc().authenticators(self._NETRC_MACHINE)
-                if info is not None:
-                    username = info[0]
-                    password = info[2]
-                else:
-                    raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
-            except (IOError, netrc.NetrcParseError) as err:
-                self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
-                return
-
-        # Set language
-        request = compat_urllib_request.Request(self._LANG_URL)
-        try:
-            self.report_lang()
-            compat_urllib_request.urlopen(request).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_warning(u'unable to set language: %s' % compat_str(err))
-            return
-
-        # No authentication to be performed
-        if username is None:
-            return
-
-        request = compat_urllib_request.Request(self._LOGIN_URL)
-        try:
-            login_page = compat_urllib_request.urlopen(request).read().decode('utf-8')
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_warning(u'unable to fetch login page: %s' % compat_str(err))
-            return
-
-        galx = None
-        dsh = None
-        match = re.search(re.compile(r'<input.+?name="GALX".+?value="(.+?)"', re.DOTALL), login_page)
-        if match:
-          galx = match.group(1)
-
-        match = re.search(re.compile(r'<input.+?name="dsh".+?value="(.+?)"', re.DOTALL), login_page)
-        if match:
-          dsh = match.group(1)
-
-        # Log in
-        login_form_strs = {
-                u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
-                u'Email': username,
-                u'GALX': galx,
-                u'Passwd': password,
-                u'PersistentCookie': u'yes',
-                u'_utf8': u'霱',
-                u'bgresponse': u'js_disabled',
-                u'checkConnection': u'',
-                u'checkedDomains': u'youtube',
-                u'dnConn': u'',
-                u'dsh': dsh,
-                u'pstMsg': u'0',
-                u'rmShown': u'1',
-                u'secTok': u'',
-                u'signIn': u'Sign in',
-                u'timeStmp': u'',
-                u'service': u'youtube',
-                u'uilel': u'3',
-                u'hl': u'en_US',
-        }
-        # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
-        # chokes on unicode
-        login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
-        login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
-        request = compat_urllib_request.Request(self._LOGIN_URL, login_data)
-        try:
-            self.report_login()
-            login_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
-            if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
-                self._downloader.report_warning(u'unable to log in: bad username or password')
-                return
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))
-            return
-
-        # Confirm age
-        age_form = {
-                'next_url':     '/',
-                'action_confirm':   'Confirm',
-                }
-        request = compat_urllib_request.Request(self._AGE_URL, compat_urllib_parse.urlencode(age_form))
-        try:
-            self.report_age_confirmation()
-            age_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err))
-
-    def _extract_id(self, url):
-        mobj = re.match(self._VALID_URL, url, re.VERBOSE)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        video_id = mobj.group(2)
-        return video_id
-
-    def _real_extract(self, url):
-        # Extract original video URL from URL with redirection, like age verification, using next_url parameter
-        mobj = re.search(self._NEXT_URL_RE, url)
-        if mobj:
-            url = 'https://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
-        video_id = self._extract_id(url)
-
-        # Get video webpage
-        self.report_video_webpage_download(video_id)
-        url = 'https://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
-        request = compat_urllib_request.Request(url)
-        try:
-            video_webpage_bytes = compat_urllib_request.urlopen(request).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            raise ExtractorError(u'Unable to download video webpage: %s' % compat_str(err))
-
-        video_webpage = video_webpage_bytes.decode('utf-8', 'ignore')
-
-        # Attempt to extract SWF player URL
-        mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
-        if mobj is not None:
-            player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
-        else:
-            player_url = None
-
-        # Get video info
-        self.report_video_info_webpage_download(video_id)
-        for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
-            video_info_url = ('https://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
-                    % (video_id, el_type))
-            video_info_webpage = self._download_webpage(video_info_url, video_id,
-                                    note=False,
-                                    errnote='unable to download video info webpage')
-            video_info = compat_parse_qs(video_info_webpage)
-            if 'token' in video_info:
-                break
-        if 'token' not in video_info:
-            if 'reason' in video_info:
-                raise ExtractorError(u'YouTube said: %s' % video_info['reason'][0])
-            else:
-                raise ExtractorError(u'"token" parameter not in video info for unknown reason')
-
-        # Check for "rental" videos
-        if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
-            raise ExtractorError(u'"rental" videos not supported')
-
-        # Start extracting information
-        self.report_information_extraction(video_id)
-
-        # uploader
-        if 'author' not in video_info:
-            raise ExtractorError(u'Unable to extract uploader name')
-        video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
-
-        # uploader_id
-        video_uploader_id = None
-        mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
-        if mobj is not None:
-            video_uploader_id = mobj.group(1)
-        else:
-            self._downloader.report_warning(u'unable to extract uploader nickname')
-
-        # title
-        if 'title' not in video_info:
-            raise ExtractorError(u'Unable to extract video title')
-        video_title = compat_urllib_parse.unquote_plus(video_info['title'][0])
-
-        # thumbnail image
-        if 'thumbnail_url' not in video_info:
-            self._downloader.report_warning(u'unable to extract video thumbnail')
-            video_thumbnail = ''
-        else:   # don't panic if we can't find it
-            video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
-
-        # upload date
-        upload_date = None
-        mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL)
-        if mobj is not None:
-            upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
-            upload_date = unified_strdate(upload_date)
-
-        # description
-        video_description = get_element_by_id("eow-description", video_webpage)
-        if video_description:
-            video_description = clean_html(video_description)
-        else:
-            fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
-            if fd_mobj:
-                video_description = unescapeHTML(fd_mobj.group(1))
-            else:
-                video_description = u''
-
-        # subtitles
-        video_subtitles = None
-
-        if self._downloader.params.get('writesubtitles', False):
-            video_subtitles = self._extract_subtitle(video_id)
-            if video_subtitles:
-                (sub_error, sub_lang, sub) = video_subtitles[0]
-                if sub_error:
-                    # We try with the automatic captions
-                    video_subtitles = self._request_automatic_caption(video_id, video_webpage)
-                    (sub_error_auto, sub_lang, sub) = video_subtitles[0]
-                    if sub is not None:
-                        pass
-                    else:
-                        # We report the original error
-                        self._downloader.report_error(sub_error)
-
-        if self._downloader.params.get('allsubtitles', False):
-            video_subtitles = self._extract_all_subtitles(video_id)
-            for video_subtitle in video_subtitles:
-                (sub_error, sub_lang, sub) = video_subtitle
-                if sub_error:
-                    self._downloader.report_error(sub_error)
-
-        if self._downloader.params.get('listsubtitles', False):
-            sub_lang_list = self._list_available_subtitles(video_id)
-            return
-
-        if 'length_seconds' not in video_info:
-            self._downloader.report_warning(u'unable to extract video duration')
-            video_duration = ''
-        else:
-            video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0])
-
-        # token
-        video_token = compat_urllib_parse.unquote_plus(video_info['token'][0])
-
-        # Decide which formats to download
-        req_format = self._downloader.params.get('format', None)
-
-        if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
-            self.report_rtmp_download()
-            video_url_list = [(None, video_info['conn'][0])]
-        elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
-            url_map = {}
-            for url_data_str in video_info['url_encoded_fmt_stream_map'][0].split(','):
-                url_data = compat_parse_qs(url_data_str)
-                if 'itag' in url_data and 'url' in url_data:
-                    url = url_data['url'][0] + '&signature=' + url_data['sig'][0]
-                    if not 'ratebypass' in url: url += '&ratebypass=yes'
-                    url_map[url_data['itag'][0]] = url
-
-            format_limit = self._downloader.params.get('format_limit', None)
-            available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats
-            if format_limit is not None and format_limit in available_formats:
-                format_list = available_formats[available_formats.index(format_limit):]
-            else:
-                format_list = available_formats
-            existing_formats = [x for x in format_list if x in url_map]
-            if len(existing_formats) == 0:
-                raise ExtractorError(u'no known formats available for video')
-            if self._downloader.params.get('listformats', None):
-                self._print_formats(existing_formats)
-                return
-            if req_format is None or req_format == 'best':
-                video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
-            elif req_format == 'worst':
-                video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
-            elif req_format in ('-1', 'all'):
-                video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
-            else:
-                # Specific formats. We pick the first in a slash-delimeted sequence.
-                # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'.
-                req_formats = req_format.split('/')
-                video_url_list = None
-                for rf in req_formats:
-                    if rf in url_map:
-                        video_url_list = [(rf, url_map[rf])]
-                        break
-                if video_url_list is None:
-                    raise ExtractorError(u'requested format not available')
-        else:
-            raise ExtractorError(u'no conn or url_encoded_fmt_stream_map information found in video info')
-
-        results = []
-        for format_param, video_real_url in video_url_list:
-            # Extension
-            video_extension = self._video_extensions.get(format_param, 'flv')
-
-            video_format = '{0} - {1}'.format(format_param if format_param else video_extension,
-                                              self._video_dimensions.get(format_param, '???'))
-
-            results.append({
-                'id':       video_id,
-                'url':      video_real_url,
-                'uploader': video_uploader,
-                'uploader_id': video_uploader_id,
-                'upload_date':  upload_date,
-                'title':    video_title,
-                'ext':      video_extension,
-                'format':   video_format,
-                'thumbnail':    video_thumbnail,
-                'description':  video_description,
-                'player_url':   player_url,
-                'subtitles':    video_subtitles,
-                'duration':     video_duration
-            })
-        return results
-
-
-class MetacafeIE(InfoExtractor):
-    """Information Extractor for metacafe.com."""
-
-    _VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
-    _DISCLAIMER = 'http://www.metacafe.com/family_filter/'
-    _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
-    IE_NAME = u'metacafe'
-
-    def report_disclaimer(self):
-        """Report disclaimer retrieval."""
-        self.to_screen(u'Retrieving disclaimer')
-
-    def _real_initialize(self):
-        # Retrieve disclaimer
-        request = compat_urllib_request.Request(self._DISCLAIMER)
-        try:
-            self.report_disclaimer()
-            disclaimer = compat_urllib_request.urlopen(request).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            raise ExtractorError(u'Unable to retrieve disclaimer: %s' % compat_str(err))
-
-        # Confirm age
-        disclaimer_form = {
-            'filters': '0',
-            'submit': "Continue - I'm over 18",
-            }
-        request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
-        try:
-            self.report_age_confirmation()
-            disclaimer = compat_urllib_request.urlopen(request).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err))
-
-    def _real_extract(self, url):
-        # Extract id and simplified title from URL
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        video_id = mobj.group(1)
-
-        # Check if video comes from YouTube
-        mobj2 = re.match(r'^yt-(.*)$', video_id)
-        if mobj2 is not None:
-            return [self.url_result('http://www.youtube.com/watch?v=%s' % mobj2.group(1), 'Youtube')]
-
-        # Retrieve video webpage to extract further information
-        webpage = self._download_webpage('http://www.metacafe.com/watch/%s/' % video_id, video_id)
-
-        # Extract URL, uploader and title from webpage
-        self.report_extraction(video_id)
-        mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
-        if mobj is not None:
-            mediaURL = compat_urllib_parse.unquote(mobj.group(1))
-            video_extension = mediaURL[-3:]
-
-            # Extract gdaKey if available
-            mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
-            if mobj is None:
-                video_url = mediaURL
-            else:
-                gdaKey = mobj.group(1)
-                video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
-        else:
-            mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
-            if mobj is None:
-                raise ExtractorError(u'Unable to extract media URL')
-            vardict = compat_parse_qs(mobj.group(1))
-            if 'mediaData' not in vardict:
-                raise ExtractorError(u'Unable to extract media URL')
-            mobj = re.search(r'"mediaURL":"(?P<mediaURL>http.*?)",(.*?)"key":"(?P<key>.*?)"', vardict['mediaData'][0])
-            if mobj is None:
-                raise ExtractorError(u'Unable to extract media URL')
-            mediaURL = mobj.group('mediaURL').replace('\\/', '/')
-            video_extension = mediaURL[-3:]
-            video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key'))
-
-        mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
-        if mobj is None:
-            raise ExtractorError(u'Unable to extract title')
-        video_title = mobj.group(1).decode('utf-8')
-
-        mobj = re.search(r'submitter=(.*?);', webpage)
-        if mobj is None:
-            raise ExtractorError(u'Unable to extract uploader nickname')
-        video_uploader = mobj.group(1)
-
-        return [{
-            'id':       video_id.decode('utf-8'),
-            'url':      video_url.decode('utf-8'),
-            'uploader': video_uploader.decode('utf-8'),
-            'upload_date':  None,
-            'title':    video_title,
-            'ext':      video_extension.decode('utf-8'),
-        }]
-
-class DailymotionIE(InfoExtractor):
-    """Information Extractor for Dailymotion"""
-
-    _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
-    IE_NAME = u'dailymotion'
-
-    def _real_extract(self, url):
-        # Extract id and simplified title from URL
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        video_id = mobj.group(1).split('_')[0].split('?')[0]
-
-        video_extension = 'mp4'
-
-        # Retrieve video webpage to extract further information
-        request = compat_urllib_request.Request(url)
-        request.add_header('Cookie', 'family_filter=off')
-        webpage = self._download_webpage(request, video_id)
-
-        # Extract URL, uploader and title from webpage
-        self.report_extraction(video_id)
-        mobj = re.search(r'\s*var flashvars = (.*)', webpage)
-        if mobj is None:
-            raise ExtractorError(u'Unable to extract media URL')
-        flashvars = compat_urllib_parse.unquote(mobj.group(1))
-
-        for key in ['hd1080URL', 'hd720URL', 'hqURL', 'sdURL', 'ldURL', 'video_url']:
-            if key in flashvars:
-                max_quality = key
-                self.to_screen(u'Using %s' % key)
-                break
-        else:
-            raise ExtractorError(u'Unable to extract video URL')
-
-        mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars)
-        if mobj is None:
-            raise ExtractorError(u'Unable to extract video URL')
-
-        video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/')
-
-        # TODO: support choosing qualities
-
-        mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage)
-        if mobj is None:
-            raise ExtractorError(u'Unable to extract title')
-        video_title = unescapeHTML(mobj.group('title'))
-
-        video_uploader = None
-        video_uploader = self._search_regex([r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>',
-                                             # Looking for official user
-                                             r'<(?:span|a) .*?rel="author".*?>([^<]+?)</'],
-                                            webpage, 'video uploader')
-
-        video_upload_date = None
-        mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
-        if mobj is not None:
-            video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
-
-        return [{
-            'id':       video_id,
-            'url':      video_url,
-            'uploader': video_uploader,
-            'upload_date':  video_upload_date,
-            'title':    video_title,
-            'ext':      video_extension,
-        }]
-
-
-class PhotobucketIE(InfoExtractor):
-    """Information extractor for photobucket.com."""
-
-    # TODO: the original _VALID_URL was:
-    # r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
-    # Check if it's necessary to keep the old extracion process
-    _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*(([\?\&]current=)|_)(?P<id>.*)\.(?P<ext>(flv)|(mp4))'
-    IE_NAME = u'photobucket'
-
-    def _real_extract(self, url):
-        # Extract id from URL
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        video_id = mobj.group('id')
-
-        video_extension = mobj.group('ext')
-
-        # Retrieve video webpage to extract further information
-        webpage = self._download_webpage(url, video_id)
-
-        # Extract URL, uploader, and title from webpage
-        self.report_extraction(video_id)
-        # We try first by looking the javascript code:
-        mobj = re.search(r'Pb\.Data\.Shared\.put\(Pb\.Data\.Shared\.MEDIA, (?P<json>.*?)\);', webpage)
-        if mobj is not None:
-            info = json.loads(mobj.group('json'))
-            return [{
-                'id':       video_id,
-                'url':      info[u'downloadUrl'],
-                'uploader': info[u'username'],
-                'upload_date':  datetime.date.fromtimestamp(info[u'creationDate']).strftime('%Y%m%d'),
-                'title':    info[u'title'],
-                'ext':      video_extension,
-                'thumbnail': info[u'thumbUrl'],
-            }]
-
-        # We try looking in other parts of the webpage
-        video_url = self._search_regex(r'<link rel="video_src" href=".*\?file=([^"]+)" />',
-            webpage, u'video URL')
-
-        mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
-        if mobj is None:
-            raise ExtractorError(u'Unable to extract title')
-        video_title = mobj.group(1).decode('utf-8')
-        video_uploader = mobj.group(2).decode('utf-8')
-
-        return [{
-            'id':       video_id.decode('utf-8'),
-            'url':      video_url.decode('utf-8'),
-            'uploader': video_uploader,
-            'upload_date':  None,
-            'title':    video_title,
-            'ext':      video_extension.decode('utf-8'),
-        }]
-
-
-class YahooIE(InfoExtractor):
-    """Information extractor for screen.yahoo.com."""
-    _VALID_URL = r'http://screen\.yahoo\.com/.*?-(?P<id>\d*?)\.html'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        video_id = mobj.group('id')
-        webpage = self._download_webpage(url, video_id)
-        m_id = re.search(r'YUI\.namespace\("Media"\)\.CONTENT_ID = "(?P<new_id>.+?)";', webpage)
-
-        if m_id is None: 
-            # TODO: Check which url parameters are required
-            info_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;lmsoverride=1;outputformat=mrss;cb=974419660;id=%s;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id
-            webpage = self._download_webpage(info_url, video_id, u'Downloading info webpage')
-            info_re = r'''<title><!\[CDATA\[(?P<title>.*?)\]\]></title>.*
-                        <description><!\[CDATA\[(?P<description>.*?)\]\]></description>.*
-                        <media:pubStart><!\[CDATA\[(?P<date>.*?)\ .*\]\]></media:pubStart>.*
-                        <media:content\ medium="image"\ url="(?P<thumb>.*?)"\ name="LARGETHUMB"
-                        '''
-            self.report_extraction(video_id)
-            m_info = re.search(info_re, webpage, re.VERBOSE|re.DOTALL)
-            if m_info is None:
-                raise ExtractorError(u'Unable to extract video info')
-            video_title = m_info.group('title')
-            video_description = m_info.group('description')
-            video_thumb = m_info.group('thumb')
-            video_date = m_info.group('date')
-            video_date = datetime.datetime.strptime(video_date, '%m/%d/%Y').strftime('%Y%m%d')
-    
-            # TODO: Find a way to get mp4 videos
-            rest_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;element=stream;outputformat=mrss;id=%s;lmsoverride=1;bw=375;dynamicstream=1;cb=83521105;tech=flv,mp4;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id
-            webpage = self._download_webpage(rest_url, video_id, u'Downloading video url webpage')
-            m_rest = re.search(r'<media:content url="(?P<url>.*?)" path="(?P<path>.*?)"', webpage)
-            video_url = m_rest.group('url')
-            video_path = m_rest.group('path')
-            if m_rest is None:
-                raise ExtractorError(u'Unable to extract video url')
-
-        else: # We have to use a different method if another id is defined
-            long_id = m_id.group('new_id')
-            info_url = 'http://video.query.yahoo.com/v1/public/yql?q=SELECT%20*%20FROM%20yahoo.media.video.streams%20WHERE%20id%3D%22' + long_id + '%22%20AND%20format%3D%22mp4%2Cflv%22%20AND%20protocol%3D%22rtmp%2Chttp%22%20AND%20plrs%3D%2286Gj0vCaSzV_Iuf6hNylf2%22%20AND%20acctid%3D%22389%22%20AND%20plidl%3D%22%22%20AND%20pspid%3D%22792700001%22%20AND%20offnetwork%3D%22false%22%20AND%20site%3D%22ivy%22%20AND%20lang%3D%22en-US%22%20AND%20region%3D%22US%22%20AND%20override%3D%22none%22%3B&env=prod&format=json&callback=YUI.Env.JSONP.yui_3_8_1_1_1368368376830_335'
-            webpage = self._download_webpage(info_url, video_id, u'Downloading info json')
-            json_str = re.search(r'YUI.Env.JSONP.yui.*?\((.*?)\);', webpage).group(1)
-            info = json.loads(json_str)
-            res = info[u'query'][u'results'][u'mediaObj'][0]
-            stream = res[u'streams'][0]
-            video_path = stream[u'path']
-            video_url = stream[u'host']
-            meta = res[u'meta']
-            video_title = meta[u'title']
-            video_description = meta[u'description']
-            video_thumb = meta[u'thumbnail']
-            video_date = None # I can't find it
-
-        info_dict = {
-                     'id': video_id,
-                     'url': video_url,
-                     'play_path': video_path,
-                     'title':video_title,
-                     'description': video_description,
-                     'thumbnail': video_thumb,
-                     'upload_date': video_date,
-                     'ext': 'flv',
-                     }
-        return info_dict
-
-class VimeoIE(InfoExtractor):
-    """Information extractor for vimeo.com."""
-
-    # _VALID_URL matches Vimeo URLs
-    _VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo(?P<pro>pro)?\.com/(?:(?:(?:groups|album)/[^/]+)|(?:.*?)/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)'
-    IE_NAME = u'vimeo'
-
-    def _real_extract(self, url, new_video=True):
-        # Extract ID from URL
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        video_id = mobj.group('id')
-        if not mobj.group('proto'):
-            url = 'https://' + url
-        if mobj.group('direct_link') or mobj.group('pro'):
-            url = 'https://vimeo.com/' + video_id
-
-        # Retrieve video webpage to extract further information
-        request = compat_urllib_request.Request(url, None, std_headers)
-        webpage = self._download_webpage(request, video_id)
-
-        # Now we begin extracting as much information as we can from what we
-        # retrieved. First we extract the information common to all extractors,
-        # and latter we extract those that are Vimeo specific.
-        self.report_extraction(video_id)
-
-        # Extract the config JSON
-        try:
-            config = webpage.split(' = {config:')[1].split(',assets:')[0]
-            config = json.loads(config)
-        except:
-            if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
-                raise ExtractorError(u'The author has restricted the access to this video, try with the "--referer" option')
-            else:
-                raise ExtractorError(u'Unable to extract info section')
-
-        # Extract title
-        video_title = config["video"]["title"]
-
-        # Extract uploader and uploader_id
-        video_uploader = config["video"]["owner"]["name"]
-        video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] if config["video"]["owner"]["url"] else None
-
-        # Extract video thumbnail
-        video_thumbnail = config["video"]["thumbnail"]
-
-        # Extract video description
-        video_description = get_element_by_attribute("itemprop", "description", webpage)
-        if video_description: video_description = clean_html(video_description)
-        else: video_description = u''
-
-        # Extract upload date
-        video_upload_date = None
-        mobj = re.search(r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage)
-        if mobj is not None:
-            video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
-
-        # Vimeo specific: extract request signature and timestamp
-        sig = config['request']['signature']
-        timestamp = config['request']['timestamp']
-
-        # Vimeo specific: extract video codec and quality information
-        # First consider quality, then codecs, then take everything
-        # TODO bind to format param
-        codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')]
-        files = { 'hd': [], 'sd': [], 'other': []}
-        for codec_name, codec_extension in codecs:
-            if codec_name in config["video"]["files"]:
-                if 'hd' in config["video"]["files"][codec_name]:
-                    files['hd'].append((codec_name, codec_extension, 'hd'))
-                elif 'sd' in config["video"]["files"][codec_name]:
-                    files['sd'].append((codec_name, codec_extension, 'sd'))
-                else:
-                    files['other'].append((codec_name, codec_extension, config["video"]["files"][codec_name][0]))
-
-        for quality in ('hd', 'sd', 'other'):
-            if len(files[quality]) > 0:
-                video_quality = files[quality][0][2]
-                video_codec = files[quality][0][0]
-                video_extension = files[quality][0][1]
-                self.to_screen(u'%s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
-                break
-        else:
-            raise ExtractorError(u'No known codec found')
-
-        video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
-                    %(video_id, sig, timestamp, video_quality, video_codec.upper())
-
-        return [{
-            'id':       video_id,
-            'url':      video_url,
-            'uploader': video_uploader,
-            'uploader_id': video_uploader_id,
-            'upload_date':  video_upload_date,
-            'title':    video_title,
-            'ext':      video_extension,
-            'thumbnail':    video_thumbnail,
-            'description':  video_description,
-        }]
-
-
-class ArteTvIE(InfoExtractor):
-    """arte.tv information extractor."""
-
-    _VALID_URL = r'(?:http://)?videos\.arte\.tv/(?:fr|de)/videos/.*'
-    _LIVE_URL = r'index-[0-9]+\.html$'
-
-    IE_NAME = u'arte.tv'
-
-    def fetch_webpage(self, url):
-        request = compat_urllib_request.Request(url)
-        try:
-            self.report_download_webpage(url)
-            webpage = compat_urllib_request.urlopen(request).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            raise ExtractorError(u'Unable to retrieve video webpage: %s' % compat_str(err))
-        except ValueError as err:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        return webpage
-
-    def grep_webpage(self, url, regex, regexFlags, matchTuples):
-        page = self.fetch_webpage(url)
-        mobj = re.search(regex, page, regexFlags)
-        info = {}
-
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        for (i, key, err) in matchTuples:
-            if mobj.group(i) is None:
-                raise ExtractorError(err)
-            else:
-                info[key] = mobj.group(i)
-
-        return info
-
-    def extractLiveStream(self, url):
-        video_lang = url.split('/')[-4]
-        info = self.grep_webpage(
-            url,
-            r'src="(.*?/videothek_js.*?\.js)',
-            0,
-            [
-                (1, 'url', u'Invalid URL: %s' % url)
-            ]
-        )
-        http_host = url.split('/')[2]
-        next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url')))
-        info = self.grep_webpage(
-            next_url,
-            r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
-                '(http://.*?\.swf).*?' +
-                '(rtmp://.*?)\'',
-            re.DOTALL,
-            [
-                (1, 'path',   u'could not extract video path: %s' % url),
-                (2, 'player', u'could not extract video player: %s' % url),
-                (3, 'url',    u'could not extract video url: %s' % url)
-            ]
-        )
-        video_url = u'%s/%s' % (info.get('url'), info.get('path'))
-
-    def extractPlus7Stream(self, url):
-        video_lang = url.split('/')[-3]
-        info = self.grep_webpage(
-            url,
-            r'param name="movie".*?videorefFileUrl=(http[^\'"&]*)',
-            0,
-            [
-                (1, 'url', u'Invalid URL: %s' % url)
-            ]
-        )
-        next_url = compat_urllib_parse.unquote(info.get('url'))
-        info = self.grep_webpage(
-            next_url,
-            r'<video lang="%s" ref="(http[^\'"&]*)' % video_lang,
-            0,
-            [
-                (1, 'url', u'Could not find <video> tag: %s' % url)
-            ]
-        )
-        next_url = compat_urllib_parse.unquote(info.get('url'))
-
-        info = self.grep_webpage(
-            next_url,
-            r'<video id="(.*?)".*?>.*?' +
-                '<name>(.*?)</name>.*?' +
-                '<dateVideo>(.*?)</dateVideo>.*?' +
-                '<url quality="hd">(.*?)</url>',
-            re.DOTALL,
-            [
-                (1, 'id',    u'could not extract video id: %s' % url),
-                (2, 'title', u'could not extract video title: %s' % url),
-                (3, 'date',  u'could not extract video date: %s' % url),
-                (4, 'url',   u'could not extract video url: %s' % url)
-            ]
-        )
-
-        return {
-            'id':           info.get('id'),
-            'url':          compat_urllib_parse.unquote(info.get('url')),
-            'uploader':     u'arte.tv',
-            'upload_date':  unified_strdate(info.get('date')),
-            'title':        info.get('title').decode('utf-8'),
-            'ext':          u'mp4',
-            'format':       u'NA',
-            'player_url':   None,
-        }
-
-    def _real_extract(self, url):
-        video_id = url.split('/')[-1]
-        self.report_extraction(video_id)
-
-        if re.search(self._LIVE_URL, video_id) is not None:
-            self.extractLiveStream(url)
-            return
-        else:
-            info = self.extractPlus7Stream(url)
-
-        return [info]
-
-
-class GenericIE(InfoExtractor):
-    """Generic last-resort information extractor."""
-
-    _VALID_URL = r'.*'
-    IE_NAME = u'generic'
-
-    def report_download_webpage(self, video_id):
-        """Report webpage download."""
-        if not self._downloader.params.get('test', False):
-            self._downloader.report_warning(u'Falling back on generic information extractor.')
-        super(GenericIE, self).report_download_webpage(video_id)
-
-    def report_following_redirect(self, new_url):
-        """Report information extraction."""
-        self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url)
-
-    def _test_redirect(self, url):
-        """Check if it is a redirect, like url shorteners, in case return the new url."""
-        class HeadRequest(compat_urllib_request.Request):
-            def get_method(self):
-                return "HEAD"
-
-        class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
-            """
-            Subclass the HTTPRedirectHandler to make it use our
-            HeadRequest also on the redirected URL
-            """
-            def redirect_request(self, req, fp, code, msg, headers, newurl):
-                if code in (301, 302, 303, 307):
-                    newurl = newurl.replace(' ', '%20')
-                    newheaders = dict((k,v) for k,v in req.headers.items()
-                                      if k.lower() not in ("content-length", "content-type"))
-                    return HeadRequest(newurl,
-                                       headers=newheaders,
-                                       origin_req_host=req.get_origin_req_host(),
-                                       unverifiable=True)
-                else:
-                    raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
-
-        class HTTPMethodFallback(compat_urllib_request.BaseHandler):
-            """
-            Fallback to GET if HEAD is not allowed (405 HTTP error)
-            """
-            def http_error_405(self, req, fp, code, msg, headers):
-                fp.read()
-                fp.close()
-
-                newheaders = dict((k,v) for k,v in req.headers.items()
-                                  if k.lower() not in ("content-length", "content-type"))
-                return self.parent.open(compat_urllib_request.Request(req.get_full_url(),
-                                                 headers=newheaders,
-                                                 origin_req_host=req.get_origin_req_host(),
-                                                 unverifiable=True))
-
-        # Build our opener
-        opener = compat_urllib_request.OpenerDirector()
-        for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
-                        HTTPMethodFallback, HEADRedirectHandler,
-                        compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
-            opener.add_handler(handler())
-
-        response = opener.open(HeadRequest(url))
-        if response is None:
-            raise ExtractorError(u'Invalid URL protocol')
-        new_url = response.geturl()
-
-        if url == new_url:
-            return False
-
-        self.report_following_redirect(new_url)
-        return new_url
-
-    def _real_extract(self, url):
-        new_url = self._test_redirect(url)
-        if new_url: return [self.url_result(new_url)]
-
-        video_id = url.split('/')[-1]
-        try:
-            webpage = self._download_webpage(url, video_id)
-        except ValueError as err:
-            # since this is the last-resort InfoExtractor, if
-            # this error is thrown, it'll be thrown here
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        self.report_extraction(video_id)
-        # Start with something easy: JW Player in SWFObject
-        mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
-        if mobj is None:
-            # Broaden the search a little bit
-            mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
-        if mobj is None:
-            # Broaden the search a little bit: JWPlayer JS loader
-            mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
-        if mobj is None:
-            # Try to find twitter cards info
-            mobj = re.search(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        # It's possible that one of the regexes
-        # matched, but returned an empty group:
-        if mobj.group(1) is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        video_url = compat_urllib_parse.unquote(mobj.group(1))
-        video_id = os.path.basename(video_url)
-
-        # here's a fun little line of code for you:
-        video_extension = os.path.splitext(video_id)[1][1:]
-        video_id = os.path.splitext(video_id)[0]
-
-        # it's tempting to parse this further, but you would
-        # have to take into account all the variations like
-        #   Video Title - Site Name
-        #   Site Name | Video Title
-        #   Video Title - Tagline | Site Name
-        # and so on and so forth; it's just not practical
-        video_title = self._html_search_regex(r'<title>(.*)</title>',
-            webpage, u'video title')
-
-        # video uploader is domain name
-        video_uploader = self._search_regex(r'(?:https?://)?([^/]*)/.*',
-            url, u'video uploader')
-
-        return [{
-            'id':       video_id,
-            'url':      video_url,
-            'uploader': video_uploader,
-            'upload_date':  None,
-            'title':    video_title,
-            'ext':      video_extension,
-        }]
-
-
-class YoutubeSearchIE(SearchInfoExtractor):
-    """Information Extractor for YouTube search queries."""
-    _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
-    _MAX_RESULTS = 1000
-    IE_NAME = u'youtube:search'
-    _SEARCH_KEY = 'ytsearch'
-
-    def report_download_page(self, query, pagenum):
-        """Report attempt to download search page with given number."""
-        self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
-
-    def _get_n_results(self, query, n):
-        """Get a specified number of results for a query"""
-
-        video_ids = []
-        pagenum = 0
-        limit = n
-
-        while (50 * pagenum) < limit:
-            self.report_download_page(query, pagenum+1)
-            result_url = self._API_URL % (compat_urllib_parse.quote_plus(query), (50*pagenum)+1)
-            request = compat_urllib_request.Request(result_url)
-            try:
-                data = compat_urllib_request.urlopen(request).read().decode('utf-8')
-            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                raise ExtractorError(u'Unable to download API page: %s' % compat_str(err))
-            api_response = json.loads(data)['data']
-
-            if not 'items' in api_response:
-                raise ExtractorError(u'[youtube] No video results')
-
-            new_ids = list(video['id'] for video in api_response['items'])
-            video_ids += new_ids
-
-            limit = min(n, api_response['totalItems'])
-            pagenum += 1
-
-        if len(video_ids) > n:
-            video_ids = video_ids[:n]
-        videos = [self.url_result('http://www.youtube.com/watch?v=%s' % id, 'Youtube') for id in video_ids]
-        return self.playlist_result(videos, query)
-
-
-class GoogleSearchIE(SearchInfoExtractor):
-    """Information Extractor for Google Video search queries."""
-    _MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"'
-    _MAX_RESULTS = 1000
-    IE_NAME = u'video.google:search'
-    _SEARCH_KEY = 'gvsearch'
-
-    def _get_n_results(self, query, n):
-        """Get a specified number of results for a query"""
-
-        res = {
-            '_type': 'playlist',
-            'id': query,
-            'entries': []
-        }
-
-        for pagenum in itertools.count(1):
-            result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10)
-            webpage = self._download_webpage(result_url, u'gvsearch:' + query,
-                                             note='Downloading result page ' + str(pagenum))
-
-            for mobj in re.finditer(r'<h3 class="r"><a href="([^"]+)"', webpage):
-                e = {
-                    '_type': 'url',
-                    'url': mobj.group(1)
-                }
-                res['entries'].append(e)
-
-            if (pagenum * 10 > n) or not re.search(self._MORE_PAGES_INDICATOR, webpage):
-                return res
-
-class YahooSearchIE(SearchInfoExtractor):
-    """Information Extractor for Yahoo! Video search queries."""
-
-    _MAX_RESULTS = 1000
-    IE_NAME = u'screen.yahoo:search'
-    _SEARCH_KEY = 'yvsearch'
-
-    def _get_n_results(self, query, n):
-        """Get a specified number of results for a query"""
-
-        res = {
-            '_type': 'playlist',
-            'id': query,
-            'entries': []
-        }
-        for pagenum in itertools.count(0): 
-            result_url = u'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
-            webpage = self._download_webpage(result_url, query,
-                                             note='Downloading results page '+str(pagenum+1))
-            info = json.loads(webpage)
-            m = info[u'm']
-            results = info[u'results']
-
-            for (i, r) in enumerate(results):
-                if (pagenum * 30) +i >= n:
-                    break
-                mobj = re.search(r'(?P<url>screen\.yahoo\.com/.*?-\d*?\.html)"', r)
-                e = self.url_result('http://' + mobj.group('url'), 'Yahoo')
-                res['entries'].append(e)
-            if (pagenum * 30 +i >= n) or (m[u'last'] >= (m[u'total'] -1 )):
-                break
-
-        return res
-
-
-class YoutubePlaylistIE(InfoExtractor):
-    """Information Extractor for YouTube playlists."""
-
-    _VALID_URL = r"""(?:
-                        (?:https?://)?
-                        (?:\w+\.)?
-                        youtube\.com/
-                        (?:
-                           (?:course|view_play_list|my_playlists|artist|playlist|watch)
-                           \? (?:.*?&)*? (?:p|a|list)=
-                        |  p/
-                        )
-                        ((?:PL|EC|UU)?[0-9A-Za-z-_]{10,})
-                        .*
-                     |
-                        ((?:PL|EC|UU)[0-9A-Za-z-_]{10,})
-                     )"""
-    _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/playlists/%s?max-results=%i&start-index=%i&v=2&alt=json&safeSearch=none'
-    _MAX_RESULTS = 50
-    IE_NAME = u'youtube:playlist'
-
-    @classmethod
-    def suitable(cls, url):
-        """Receives a URL and returns True if suitable for this IE."""
-        return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
-
-    def _real_extract(self, url):
-        # Extract playlist id
-        mobj = re.match(self._VALID_URL, url, re.VERBOSE)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        # Download playlist videos from API
-        playlist_id = mobj.group(1) or mobj.group(2)
-        page_num = 1
-        videos = []
-
-        while True:
-            url = self._TEMPLATE_URL % (playlist_id, self._MAX_RESULTS, self._MAX_RESULTS * (page_num - 1) + 1)
-            page = self._download_webpage(url, playlist_id, u'Downloading page #%s' % page_num)
-
-            try:
-                response = json.loads(page)
-            except ValueError as err:
-                raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err))
-
-            if 'feed' not in response:
-                raise ExtractorError(u'Got a malformed response from YouTube API')
-            playlist_title = response['feed']['title']['$t']
-            if 'entry' not in response['feed']:
-                # Number of videos is a multiple of self._MAX_RESULTS
-                break
-
-            videos += [ (entry['yt$position']['$t'], entry['content']['src'])
-                        for entry in response['feed']['entry']
-                        if 'content' in entry ]
-
-            if len(response['feed']['entry']) < self._MAX_RESULTS:
-                break
-            page_num += 1
-
-        videos = [v[1] for v in sorted(videos)]
-
-        url_results = [self.url_result(url, 'Youtube') for url in videos]
-        return [self.playlist_result(url_results, playlist_id, playlist_title)]
-
-
-class YoutubeChannelIE(InfoExtractor):
-    """Information Extractor for YouTube channels."""
-
-    _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)"
-    _TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en'
-    _MORE_PAGES_INDICATOR = 'yt-uix-load-more'
-    _MORE_PAGES_URL = 'http://www.youtube.com/channel_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s'
-    IE_NAME = u'youtube:channel'
-
-    def extract_videos_from_page(self, page):
-        ids_in_page = []
-        for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&?', page):
-            if mobj.group(1) not in ids_in_page:
-                ids_in_page.append(mobj.group(1))
-        return ids_in_page
-
-    def _real_extract(self, url):
-        # Extract channel id
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        # Download channel page
-        channel_id = mobj.group(1)
-        video_ids = []
-        pagenum = 1
-
-        url = self._TEMPLATE_URL % (channel_id, pagenum)
-        page = self._download_webpage(url, channel_id,
-                                      u'Downloading page #%s' % pagenum)
-
-        # Extract video identifiers
-        ids_in_page = self.extract_videos_from_page(page)
-        video_ids.extend(ids_in_page)
-
-        # Download any subsequent channel pages using the json-based channel_ajax query
-        if self._MORE_PAGES_INDICATOR in page:
-            while True:
-                pagenum = pagenum + 1
-
-                url = self._MORE_PAGES_URL % (pagenum, channel_id)
-                page = self._download_webpage(url, channel_id,
-                                              u'Downloading page #%s' % pagenum)
-
-                page = json.loads(page)
-
-                ids_in_page = self.extract_videos_from_page(page['content_html'])
-                video_ids.extend(ids_in_page)
-
-                if self._MORE_PAGES_INDICATOR  not in page['load_more_widget_html']:
-                    break
-
-        self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
-
-        urls = ['http://www.youtube.com/watch?v=%s' % id for id in video_ids]
-        url_entries = [self.url_result(url, 'Youtube') for url in urls]
-        return [self.playlist_result(url_entries, channel_id)]
-
-
-class YoutubeUserIE(InfoExtractor):
-    """Information Extractor for YouTube users."""
-
-    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)'
-    _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
-    _GDATA_PAGE_SIZE = 50
-    _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
-    _VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]'
-    IE_NAME = u'youtube:user'
-
-    def _real_extract(self, url):
-        # Extract username
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        username = mobj.group(1)
-
-        # Download video ids using YouTube Data API. Result size per
-        # query is limited (currently to 50 videos) so we need to query
-        # page by page until there are no video ids - it means we got
-        # all of them.
-
-        video_ids = []
-        pagenum = 0
-
-        while True:
-            start_index = pagenum * self._GDATA_PAGE_SIZE + 1
-
-            gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
-            page = self._download_webpage(gdata_url, username,
-                                          u'Downloading video ids from %d to %d' % (start_index, start_index + self._GDATA_PAGE_SIZE))
-
-            # Extract video identifiers
-            ids_in_page = []
-
-            for mobj in re.finditer(self._VIDEO_INDICATOR, page):
-                if mobj.group(1) not in ids_in_page:
-                    ids_in_page.append(mobj.group(1))
-
-            video_ids.extend(ids_in_page)
-
-            # A little optimization - if current page is not
-            # "full", ie. does not contain PAGE_SIZE video ids then
-            # we can assume that this page is the last one - there
-            # are no more ids on further pages - no need to query
-            # again.
-
-            if len(ids_in_page) < self._GDATA_PAGE_SIZE:
-                break
-
-            pagenum += 1
-
-        urls = ['http://www.youtube.com/watch?v=%s' % video_id for video_id in video_ids]
-        url_results = [self.url_result(url, 'Youtube') for url in urls]
-        return [self.playlist_result(url_results, playlist_title = username)]
-
-
-class BlipTVUserIE(InfoExtractor):
-    """Information Extractor for blip.tv users."""
-
-    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
-    _PAGE_SIZE = 12
-    IE_NAME = u'blip.tv:user'
-
-    def _real_extract(self, url):
-        # Extract username
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        username = mobj.group(1)
-
-        page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
-
-        page = self._download_webpage(url, username, u'Downloading user page')
-        mobj = re.search(r'data-users-id="([^"]+)"', page)
-        page_base = page_base % mobj.group(1)
-
-
-        # Download video ids using BlipTV Ajax calls. Result size per
-        # query is limited (currently to 12 videos) so we need to query
-        # page by page until there are no video ids - it means we got
-        # all of them.
-
-        video_ids = []
-        pagenum = 1
-
-        while True:
-            url = page_base + "&page=" + str(pagenum)
-            page = self._download_webpage(url, username,
-                                          u'Downloading video ids from page %d' % pagenum)
-
-            # Extract video identifiers
-            ids_in_page = []
-
-            for mobj in re.finditer(r'href="/([^"]+)"', page):
-                if mobj.group(1) not in ids_in_page:
-                    ids_in_page.append(unescapeHTML(mobj.group(1)))
-
-            video_ids.extend(ids_in_page)
-
-            # A little optimization - if current page is not
-            # "full", ie. does not contain PAGE_SIZE video ids then
-            # we can assume that this page is the last one - there
-            # are no more ids on further pages - no need to query
-            # again.
-
-            if len(ids_in_page) < self._PAGE_SIZE:
-                break
-
-            pagenum += 1
-
-        urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids]
-        url_entries = [self.url_result(url, 'BlipTV') for url in urls]
-        return [self.playlist_result(url_entries, playlist_title = username)]
-
-
-class DepositFilesIE(InfoExtractor):
-    """Information extractor for depositfiles.com"""
-
-    _VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)'
-
-    def _real_extract(self, url):
-        file_id = url.split('/')[-1]
-        # Rebuild url in english locale
-        url = 'http://depositfiles.com/en/files/' + file_id
-
-        # Retrieve file webpage with 'Free download' button pressed
-        free_download_indication = { 'gateway_result' : '1' }
-        request = compat_urllib_request.Request(url, compat_urllib_parse.urlencode(free_download_indication))
-        try:
-            self.report_download_webpage(file_id)
-            webpage = compat_urllib_request.urlopen(request).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            raise ExtractorError(u'Unable to retrieve file webpage: %s' % compat_str(err))
-
-        # Search for the real file URL
-        mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage)
-        if (mobj is None) or (mobj.group(1) is None):
-            # Try to figure out reason of the error.
-            mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
-            if (mobj is not None) and (mobj.group(1) is not None):
-                restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
-                raise ExtractorError(u'%s' % restriction_message)
-            else:
-                raise ExtractorError(u'Unable to extract download URL from: %s' % url)
-
-        file_url = mobj.group(1)
-        file_extension = os.path.splitext(file_url)[1][1:]
-
-        # Search for file title
-        file_title = self._search_regex(r'<b title="(.*?)">', webpage, u'title')
-
-        return [{
-            'id':       file_id.decode('utf-8'),
-            'url':      file_url.decode('utf-8'),
-            'uploader': None,
-            'upload_date':  None,
-            'title':    file_title,
-            'ext':      file_extension.decode('utf-8'),
-        }]
-
-
-class FacebookIE(InfoExtractor):
-    """Information Extractor for Facebook"""
-
-    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
-    _LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
-    _NETRC_MACHINE = 'facebook'
-    IE_NAME = u'facebook'
-
-    def report_login(self):
-        """Report attempt to log in."""
-        self.to_screen(u'Logging in')
-
-    def _real_initialize(self):
-        if self._downloader is None:
-            return
-
-        useremail = None
-        password = None
-        downloader_params = self._downloader.params
-
-        # Attempt to use provided username and password or .netrc data
-        if downloader_params.get('username', None) is not None:
-            useremail = downloader_params['username']
-            password = downloader_params['password']
-        elif downloader_params.get('usenetrc', False):
-            try:
-                info = netrc.netrc().authenticators(self._NETRC_MACHINE)
-                if info is not None:
-                    useremail = info[0]
-                    password = info[2]
-                else:
-                    raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
-            except (IOError, netrc.NetrcParseError) as err:
-                self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
-                return
-
-        if useremail is None:
-            return
-
-        # Log in
-        login_form = {
-            'email': useremail,
-            'pass': password,
-            'login': 'Log+In'
-            }
-        request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
-        try:
-            self.report_login()
-            login_results = compat_urllib_request.urlopen(request).read()
-            if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
-                self._downloader.report_warning(u'unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
-                return
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))
-            return
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        video_id = mobj.group('ID')
-
-        url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
-        webpage = self._download_webpage(url, video_id)
-
-        BEFORE = '{swf.addParam(param[0], param[1]);});\n'
-        AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
-        m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
-        if not m:
-            raise ExtractorError(u'Cannot parse data')
-        data = dict(json.loads(m.group(1)))
-        params_raw = compat_urllib_parse.unquote(data['params'])
-        params = json.loads(params_raw)
-        video_data = params['video_data'][0]
-        video_url = video_data.get('hd_src')
-        if not video_url:
-            video_url = video_data['sd_src']
-        if not video_url:
-            raise ExtractorError(u'Cannot find video URL')
-        video_duration = int(video_data['video_duration'])
-        thumbnail = video_data['thumbnail_src']
-
-        video_title = self._html_search_regex('<h2 class="uiHeaderTitle">([^<]+)</h2>',
-            webpage, u'title')
-
-        info = {
-            'id': video_id,
-            'title': video_title,
-            'url': video_url,
-            'ext': 'mp4',
-            'duration': video_duration,
-            'thumbnail': thumbnail,
-        }
-        return [info]
-
-
-class BlipTVIE(InfoExtractor):
-    """Information extractor for blip.tv"""
-
-    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(.+)$'
-    _URL_EXT = r'^.*\.([a-z0-9]+)$'
-    IE_NAME = u'blip.tv'
-
-    def report_direct_download(self, title):
-        """Report information extraction."""
-        self.to_screen(u'%s: Direct download detected' % title)
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        # See https://github.com/rg3/youtube-dl/issues/857
-        api_mobj = re.match(r'http://a\.blip\.tv/api\.swf#(?P<video_id>[\d\w]+)', url)
-        if api_mobj is not None:
-            url = 'http://blip.tv/play/g_%s' % api_mobj.group('video_id')
-        urlp = compat_urllib_parse_urlparse(url)
-        if urlp.path.startswith('/play/'):
-            request = compat_urllib_request.Request(url)
-            response = compat_urllib_request.urlopen(request)
-            redirecturl = response.geturl()
-            rurlp = compat_urllib_parse_urlparse(redirecturl)
-            file_id = compat_parse_qs(rurlp.fragment)['file'][0].rpartition('/')[2]
-            url = 'http://blip.tv/a/a-' + file_id
-            return self._real_extract(url)
-
-
-        if '?' in url:
-            cchar = '&'
-        else:
-            cchar = '?'
-        json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
-        request = compat_urllib_request.Request(json_url)
-        request.add_header('User-Agent', 'iTunes/10.6.1')
-        self.report_extraction(mobj.group(1))
-        info = None
-        try:
-            urlh = compat_urllib_request.urlopen(request)
-            if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
-                basename = url.split('/')[-1]
-                title,ext = os.path.splitext(basename)
-                title = title.decode('UTF-8')
-                ext = ext.replace('.', '')
-                self.report_direct_download(title)
-                info = {
-                    'id': title,
-                    'url': url,
-                    'uploader': None,
-                    'upload_date': None,
-                    'title': title,
-                    'ext': ext,
-                    'urlhandle': urlh
-                }
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            raise ExtractorError(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
-        if info is None: # Regular URL
-            try:
-                json_code_bytes = urlh.read()
-                json_code = json_code_bytes.decode('utf-8')
-            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                raise ExtractorError(u'Unable to read video info webpage: %s' % compat_str(err))
-
-            try:
-                json_data = json.loads(json_code)
-                if 'Post' in json_data:
-                    data = json_data['Post']
-                else:
-                    data = json_data
-
-                upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
-                video_url = data['media']['url']
-                umobj = re.match(self._URL_EXT, video_url)
-                if umobj is None:
-                    raise ValueError('Can not determine filename extension')
-                ext = umobj.group(1)
-
-                info = {
-                    'id': data['item_id'],
-                    'url': video_url,
-                    'uploader': data['display_name'],
-                    'upload_date': upload_date,
-                    'title': data['title'],
-                    'ext': ext,
-                    'format': data['media']['mimeType'],
-                    'thumbnail': data['thumbnailUrl'],
-                    'description': data['description'],
-                    'player_url': data['embedUrl'],
-                    'user_agent': 'iTunes/10.6.1',
-                }
-            except (ValueError,KeyError) as err:
-                raise ExtractorError(u'Unable to parse video information: %s' % repr(err))
-
-        return [info]
-
-
-class MyVideoIE(InfoExtractor):
-    """Information Extractor for myvideo.de."""
-
-    _VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*'
-    IE_NAME = u'myvideo'
-
-    # Original Code from: https://github.com/dersphere/plugin.video.myvideo_de.git
-    # Released into the Public Domain by Tristan Fischer on 2013-05-19
-    # https://github.com/rg3/youtube-dl/pull/842
-    def __rc4crypt(self,data, key):
-        x = 0
-        box = list(range(256))
-        for i in list(range(256)):
-            x = (x + box[i] + compat_ord(key[i % len(key)])) % 256
-            box[i], box[x] = box[x], box[i]
-        x = 0
-        y = 0
-        out = ''
-        for char in data:
-            x = (x + 1) % 256
-            y = (y + box[x]) % 256
-            box[x], box[y] = box[y], box[x]
-            out += chr(compat_ord(char) ^ box[(box[x] + box[y]) % 256])
-        return out
-
-    def __md5(self,s):
-        return hashlib.md5(s).hexdigest().encode()
-
-    def _real_extract(self,url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'invalid URL: %s' % url)
-
-        video_id = mobj.group(1)
-
-        GK = (
-          b'WXpnME1EZGhNRGhpTTJNM01XVmhOREU0WldNNVpHTTJOakpt'
-          b'TW1FMU5tVTBNR05pWkRaa05XRXhNVFJoWVRVd1ptSXhaVEV3'
-          b'TnpsbA0KTVRkbU1tSTRNdz09'
-        )
-
-        # Get video webpage
-        webpage_url = 'http://www.myvideo.de/watch/%s' % video_id
-        webpage = self._download_webpage(webpage_url, video_id)
-
-        mobj = re.search('source src=\'(.+?)[.]([^.]+)\'', webpage)
-        if mobj is not None:
-            self.report_extraction(video_id)
-            video_url = mobj.group(1) + '.flv'
-
-            video_title = self._html_search_regex('<title>([^<]+)</title>',
-                webpage, u'title')
-
-            video_ext = self._search_regex('[.](.+?)$', video_url, u'extension')
-
-            return [{
-                'id':       video_id,
-                'url':      video_url,
-                'uploader': None,
-                'upload_date':  None,
-                'title':    video_title,
-                'ext':      u'flv',
-            }]
-
-        # try encxml
-        mobj = re.search('var flashvars={(.+?)}', webpage)
-        if mobj is None:
-            raise ExtractorError(u'Unable to extract video')
-
-        params = {}
-        encxml = ''
-        sec = mobj.group(1)
-        for (a, b) in re.findall('(.+?):\'(.+?)\',?', sec):
-            if not a == '_encxml':
-                params[a] = b
-            else:
-                encxml = compat_urllib_parse.unquote(b)
-        if not params.get('domain'):
-            params['domain'] = 'www.myvideo.de'
-        xmldata_url = '%s?%s' % (encxml, compat_urllib_parse.urlencode(params))
-        if 'flash_playertype=MTV' in xmldata_url:
-            self._downloader.report_warning(u'avoiding MTV player')
-            xmldata_url = (
-                'http://www.myvideo.de/dynamic/get_player_video_xml.php'
-                '?flash_playertype=D&ID=%s&_countlimit=4&autorun=yes'
-            ) % video_id
-
-        # get enc data
-        enc_data = self._download_webpage(xmldata_url, video_id).split('=')[1]
-        enc_data_b = binascii.unhexlify(enc_data)
-        sk = self.__md5(
-            base64.b64decode(base64.b64decode(GK)) +
-            self.__md5(
-                str(video_id).encode('utf-8')
-            )
-        )
-        dec_data = self.__rc4crypt(enc_data_b, sk)
-
-        # extracting infos
-        self.report_extraction(video_id)
-
-        video_url = None
-        mobj = re.search('connectionurl=\'(.*?)\'', dec_data)
-        if mobj:
-            video_url = compat_urllib_parse.unquote(mobj.group(1))
-            if 'myvideo2flash' in video_url:
-                self._downloader.report_warning(u'forcing RTMPT ...')
-                video_url = video_url.replace('rtmpe://', 'rtmpt://')
-
-        if not video_url:
-            # extract non rtmp videos
-            mobj = re.search('path=\'(http.*?)\' source=\'(.*?)\'', dec_data)
-            if mobj is None:
-                raise ExtractorError(u'unable to extract url')
-            video_url = compat_urllib_parse.unquote(mobj.group(1)) + compat_urllib_parse.unquote(mobj.group(2))
-
-        video_file = self._search_regex('source=\'(.*?)\'', dec_data, u'video file')
-        video_file = compat_urllib_parse.unquote(video_file)
-
-        if not video_file.endswith('f4m'):
-            ppath, prefix = video_file.split('.')
-            video_playpath = '%s:%s' % (prefix, ppath)
-            video_hls_playlist = ''
-        else:
-            video_playpath = ''
-            video_hls_playlist = (
-                video_filepath + video_file
-            ).replace('.f4m', '.m3u8')
-
-        video_swfobj = self._search_regex('swfobject.embedSWF\(\'(.+?)\'', webpage, u'swfobj')
-        video_swfobj = compat_urllib_parse.unquote(video_swfobj)
-
-        video_title = self._html_search_regex("<h1(?: class='globalHd')?>(.*?)</h1>",
-            webpage, u'title')
-
-        return [{
-            'id':                 video_id,
-            'url':                video_url,
-            'tc_url':             video_url,
-            'uploader':           None,
-            'upload_date':        None,
-            'title':              video_title,
-            'ext':                u'flv',
-            'play_path':          video_playpath,
-            'video_file':         video_file,
-            'video_hls_playlist': video_hls_playlist,
-            'player_url':         video_swfobj,
-        }]
-
-
-class ComedyCentralIE(InfoExtractor):
-    """Information extractor for The Daily Show and Colbert Report """
-
-    # urls can be abbreviations like :thedailyshow or :colbert
-    # urls for episodes like:
-    # or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
-    #                     or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
-    #                     or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
-    _VALID_URL = r"""^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)
-                      |(https?://)?(www\.)?
-                          (?P<showname>thedailyshow|colbertnation)\.com/
-                         (full-episodes/(?P<episode>.*)|
-                          (?P<clip>
-                              (the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
-                              |(watch/(?P<date>[^/]*)/(?P<tdstitle>.*)))))
-                     $"""
-
-    _available_formats = ['3500', '2200', '1700', '1200', '750', '400']
-
-    _video_extensions = {
-        '3500': 'mp4',
-        '2200': 'mp4',
-        '1700': 'mp4',
-        '1200': 'mp4',
-        '750': 'mp4',
-        '400': 'mp4',
-    }
-    _video_dimensions = {
-        '3500': '1280x720',
-        '2200': '960x540',
-        '1700': '768x432',
-        '1200': '640x360',
-        '750': '512x288',
-        '400': '384x216',
-    }
-
-    @classmethod
-    def suitable(cls, url):
-        """Receives a URL and returns True if suitable for this IE."""
-        return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
-
-    def _print_formats(self, formats):
-        print('Available formats:')
-        for x in formats:
-            print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'mp4'), self._video_dimensions.get(x, '???')))
-
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url, re.VERBOSE)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        if mobj.group('shortname'):
-            if mobj.group('shortname') in ('tds', 'thedailyshow'):
-                url = u'http://www.thedailyshow.com/full-episodes/'
-            else:
-                url = u'http://www.colbertnation.com/full-episodes/'
-            mobj = re.match(self._VALID_URL, url, re.VERBOSE)
-            assert mobj is not None
-
-        if mobj.group('clip'):
-            if mobj.group('showname') == 'thedailyshow':
-                epTitle = mobj.group('tdstitle')
-            else:
-                epTitle = mobj.group('cntitle')
-            dlNewest = False
-        else:
-            dlNewest = not mobj.group('episode')
-            if dlNewest:
-                epTitle = mobj.group('showname')
-            else:
-                epTitle = mobj.group('episode')
-
-        self.report_extraction(epTitle)
-        webpage,htmlHandle = self._download_webpage_handle(url, epTitle)
-        if dlNewest:
-            url = htmlHandle.geturl()
-            mobj = re.match(self._VALID_URL, url, re.VERBOSE)
-            if mobj is None:
-                raise ExtractorError(u'Invalid redirected URL: ' + url)
-            if mobj.group('episode') == '':
-                raise ExtractorError(u'Redirected URL is still not specific: ' + url)
-            epTitle = mobj.group('episode')
-
-        mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
-
-        if len(mMovieParams) == 0:
-            # The Colbert Report embeds the information in a without
-            # a URL prefix; so extract the alternate reference
-            # and then add the URL prefix manually.
-
-            altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage)
-            if len(altMovieParams) == 0:
-                raise ExtractorError(u'unable to find Flash URL in webpage ' + url)
-            else:
-                mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
-
-        uri = mMovieParams[0][1]
-        indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
-        indexXml = self._download_webpage(indexUrl, epTitle,
-                                          u'Downloading show index',
-                                          u'unable to download episode index')
-
-        results = []
-
-        idoc = xml.etree.ElementTree.fromstring(indexXml)
-        itemEls = idoc.findall('.//item')
-        for partNum,itemEl in enumerate(itemEls):
-            mediaId = itemEl.findall('./guid')[0].text
-            shortMediaId = mediaId.split(':')[-1]
-            showId = mediaId.split(':')[-2].replace('.com', '')
-            officialTitle = itemEl.findall('./title')[0].text
-            officialDate = unified_strdate(itemEl.findall('./pubDate')[0].text)
-
-            configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
-                        compat_urllib_parse.urlencode({'uri': mediaId}))
-            configXml = self._download_webpage(configUrl, epTitle,
-                                               u'Downloading configuration for %s' % shortMediaId)
-
-            cdoc = xml.etree.ElementTree.fromstring(configXml)
-            turls = []
-            for rendition in cdoc.findall('.//rendition'):
-                finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
-                turls.append(finfo)
-
-            if len(turls) == 0:
-                self._downloader.report_error(u'unable to download ' + mediaId + ': No videos found')
-                continue
-
-            if self._downloader.params.get('listformats', None):
-                self._print_formats([i[0] for i in turls])
-                return
-
-            # For now, just pick the highest bitrate
-            format,rtmp_video_url = turls[-1]
-
-            # Get the format arg from the arg stream
-            req_format = self._downloader.params.get('format', None)
-
-            # Select format if we can find one
-            for f,v in turls:
-                if f == req_format:
-                    format, rtmp_video_url = f, v
-                    break
-
-            m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp.comedystor/.*)$', rtmp_video_url)
-            if not m:
-                raise ExtractorError(u'Cannot transform RTMP url')
-            base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
-            video_url = base + m.group('finalid')
-
-            effTitle = showId + u'-' + epTitle + u' part ' + compat_str(partNum+1)
-            info = {
-                'id': shortMediaId,
-                'url': video_url,
-                'uploader': showId,
-                'upload_date': officialDate,
-                'title': effTitle,
-                'ext': 'mp4',
-                'format': format,
-                'thumbnail': None,
-                'description': officialTitle,
-            }
-            results.append(info)
-
-        return results
-
-
-class EscapistIE(InfoExtractor):
-    """Information extractor for The Escapist """
-
-    _VALID_URL = r'^(https?://)?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$'
-    IE_NAME = u'escapist'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        showName = mobj.group('showname')
-        videoId = mobj.group('episode')
-
-        self.report_extraction(videoId)
-        webpage = self._download_webpage(url, videoId)
-
-        videoDesc = self._html_search_regex('<meta name="description" content="([^"]*)"',
-            webpage, u'description', fatal=False)
-
-        imgUrl = self._html_search_regex('<meta property="og:image" content="([^"]*)"',
-            webpage, u'thumbnail', fatal=False)
-
-        playerUrl = self._html_search_regex('<meta property="og:video" content="([^"]*)"',
-            webpage, u'player url')
-
-        title = self._html_search_regex('<meta name="title" content="([^"]*)"',
-            webpage, u'player url').split(' : ')[-1]
-
-        configUrl = self._search_regex('config=(.*)$', playerUrl, u'config url')
-        configUrl = compat_urllib_parse.unquote(configUrl)
-
-        configJSON = self._download_webpage(configUrl, videoId,
-                                            u'Downloading configuration',
-                                            u'unable to download configuration')
-
-        # Technically, it's JavaScript, not JSON
-        configJSON = configJSON.replace("'", '"')
-
-        try:
-            config = json.loads(configJSON)
-        except (ValueError,) as err:
-            raise ExtractorError(u'Invalid JSON in configuration file: ' + compat_str(err))
-
-        playlist = config['playlist']
-        videoUrl = playlist[1]['url']
-
-        info = {
-            'id': videoId,
-            'url': videoUrl,
-            'uploader': showName,
-            'upload_date': None,
-            'title': title,
-            'ext': 'mp4',
-            'thumbnail': imgUrl,
-            'description': videoDesc,
-            'player_url': playerUrl,
-        }
-
-        return [info]
-
-class CollegeHumorIE(InfoExtractor):
-    """Information extractor for collegehumor.com"""
-
-    _WORKING = False
-    _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/video/(?P<videoid>[0-9]+)/(?P<shorttitle>.*)$'
-    IE_NAME = u'collegehumor'
-
-    def report_manifest(self, video_id):
-        """Report information extraction."""
-        self.to_screen(u'%s: Downloading XML manifest' % video_id)
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        video_id = mobj.group('videoid')
-
-        info = {
-            'id': video_id,
-            'uploader': None,
-            'upload_date': None,
-        }
-
-        self.report_extraction(video_id)
-        xmlUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id
-        try:
-            metaXml = compat_urllib_request.urlopen(xmlUrl).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            raise ExtractorError(u'Unable to download video info XML: %s' % compat_str(err))
-
-        mdoc = xml.etree.ElementTree.fromstring(metaXml)
-        try:
-            videoNode = mdoc.findall('./video')[0]
-            info['description'] = videoNode.findall('./description')[0].text
-            info['title'] = videoNode.findall('./caption')[0].text
-            info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
-            manifest_url = videoNode.findall('./file')[0].text
-        except IndexError:
-            raise ExtractorError(u'Invalid metadata XML file')
-
-        manifest_url += '?hdcore=2.10.3'
-        self.report_manifest(video_id)
-        try:
-            manifestXml = compat_urllib_request.urlopen(manifest_url).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            raise ExtractorError(u'Unable to download video info XML: %s' % compat_str(err))
-
-        adoc = xml.etree.ElementTree.fromstring(manifestXml)
-        try:
-            media_node = adoc.findall('./{http://ns.adobe.com/f4m/1.0}media')[0]
-            node_id = media_node.attrib['url']
-            video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text
-        except IndexError as err:
-            raise ExtractorError(u'Invalid manifest file')
-
-        url_pr = compat_urllib_parse_urlparse(manifest_url)
-        url = url_pr.scheme + '://' + url_pr.netloc + '/z' + video_id[:-2] + '/' + node_id + 'Seg1-Frag1'
-
-        info['url'] = url
-        info['ext'] = 'f4f'
-        return [info]
-
-
-class XVideosIE(InfoExtractor):
-    """Information extractor for xvideos.com"""
-
-    _VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)'
-    IE_NAME = u'xvideos'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        video_id = mobj.group(1)
-
-        webpage = self._download_webpage(url, video_id)
-
-        self.report_extraction(video_id)
-
-        # Extract video URL
-        video_url = compat_urllib_parse.unquote(self._search_regex(r'flv_url=(.+?)&',
-            webpage, u'video URL'))
-
-        # Extract title
-        video_title = self._html_search_regex(r'<title>(.*?)\s+-\s+XVID',
-            webpage, u'title')
-
-        # Extract video thumbnail
-        video_thumbnail = self._search_regex(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)',
-            webpage, u'thumbnail', fatal=False)
-
-        info = {
-            'id': video_id,
-            'url': video_url,
-            'uploader': None,
-            'upload_date': None,
-            'title': video_title,
-            'ext': 'flv',
-            'thumbnail': video_thumbnail,
-            'description': None,
-        }
-
-        return [info]
-
-
-class SoundcloudIE(InfoExtractor):
-    """Information extractor for soundcloud.com
-       To access the media, the uid of the song and a stream token
-       must be extracted from the page source and the script must make
-       a request to media.soundcloud.com/crossdomain.xml. Then
-       the media can be grabbed by requesting from an url composed
-       of the stream token and uid
-     """
-
-    _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)'
-    IE_NAME = u'soundcloud'
-
-    def report_resolve(self, video_id):
-        """Report information extraction."""
-        self.to_screen(u'%s: Resolving id' % video_id)
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        # extract uploader (which is in the url)
-        uploader = mobj.group(1)
-        # extract simple title (uploader + slug of song title)
-        slug_title =  mobj.group(2)
-        simple_title = uploader + u'-' + slug_title
-        full_title = '%s/%s' % (uploader, slug_title)
-
-        self.report_resolve(full_title)
-
-        url = 'http://soundcloud.com/%s/%s' % (uploader, slug_title)
-        resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
-        info_json = self._download_webpage(resolv_url, full_title, u'Downloading info JSON')
-
-        info = json.loads(info_json)
-        video_id = info['id']
-        self.report_extraction(full_title)
-
-        streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
-        stream_json = self._download_webpage(streams_url, full_title,
-                                             u'Downloading stream definitions',
-                                             u'unable to download stream definitions')
-
-        streams = json.loads(stream_json)
-        mediaURL = streams['http_mp3_128_url']
-        upload_date = unified_strdate(info['created_at'])
-
-        return [{
-            'id':       info['id'],
-            'url':      mediaURL,
-            'uploader': info['user']['username'],
-            'upload_date': upload_date,
-            'title':    info['title'],
-            'ext':      u'mp3',
-            'description': info['description'],
-        }]
-
-class SoundcloudSetIE(InfoExtractor):
-    """Information extractor for soundcloud.com sets
-       To access the media, the uid of the song and a stream token
-       must be extracted from the page source and the script must make
-       a request to media.soundcloud.com/crossdomain.xml. Then
-       the media can be grabbed by requesting from an url composed
-       of the stream token and uid
-     """
-
-    _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/sets/([\w\d-]+)'
-    IE_NAME = u'soundcloud:set'
-
-    def report_resolve(self, video_id):
-        """Report information extraction."""
-        self.to_screen(u'%s: Resolving id' % video_id)
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        # extract uploader (which is in the url)
-        uploader = mobj.group(1)
-        # extract simple title (uploader + slug of song title)
-        slug_title =  mobj.group(2)
-        simple_title = uploader + u'-' + slug_title
-        full_title = '%s/sets/%s' % (uploader, slug_title)
-
-        self.report_resolve(full_title)
-
-        url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title)
-        resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
-        info_json = self._download_webpage(resolv_url, full_title)
-
-        videos = []
-        info = json.loads(info_json)
-        if 'errors' in info:
-            for err in info['errors']:
-                self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err['error_message']))
-            return
-
-        self.report_extraction(full_title)
-        for track in info['tracks']:
-            video_id = track['id']
-
-            streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
-            stream_json = self._download_webpage(streams_url, video_id, u'Downloading track info JSON')
-
-            self.report_extraction(video_id)
-            streams = json.loads(stream_json)
-            mediaURL = streams['http_mp3_128_url']
-
-            videos.append({
-                'id':       video_id,
-                'url':      mediaURL,
-                'uploader': track['user']['username'],
-                'upload_date':  unified_strdate(track['created_at']),
-                'title':    track['title'],
-                'ext':      u'mp3',
-                'description': track['description'],
-            })
-        return videos
-
-
-class InfoQIE(InfoExtractor):
-    """Information extractor for infoq.com"""
-    _VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        webpage = self._download_webpage(url, video_id=url)
-        self.report_extraction(url)
-
-        # Extract video URL
-        mobj = re.search(r"jsclassref ?= ?'([^']*)'", webpage)
-        if mobj is None:
-            raise ExtractorError(u'Unable to extract video url')
-        real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8'))
-        video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id
-
-        # Extract title
-        video_title = self._search_regex(r'contentTitle = "(.*?)";',
-            webpage, u'title')
-
-        # Extract description
-        video_description = self._html_search_regex(r'<meta name="description" content="(.*)"(?:\s*/)?>',
-            webpage, u'description', fatal=False)
-
-        video_filename = video_url.split('/')[-1]
-        video_id, extension = video_filename.split('.')
-
-        info = {
-            'id': video_id,
-            'url': video_url,
-            'uploader': None,
-            'upload_date': None,
-            'title': video_title,
-            'ext': extension, # Extension is always(?) mp4, but seems to be flv
-            'thumbnail': None,
-            'description': video_description,
-        }
-
-        return [info]
-
-class MixcloudIE(InfoExtractor):
-    """Information extractor for www.mixcloud.com"""
-
-    _WORKING = False # New API, but it seems good http://www.mixcloud.com/developers/documentation/
-    _VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([\w\d-]+)/([\w\d-]+)'
-    IE_NAME = u'mixcloud'
-
-    def report_download_json(self, file_id):
-        """Report JSON download."""
-        self.to_screen(u'Downloading json')
-
-    def get_urls(self, jsonData, fmt, bitrate='best'):
-        """Get urls from 'audio_formats' section in json"""
-        file_url = None
-        try:
-            bitrate_list = jsonData[fmt]
-            if bitrate is None or bitrate == 'best' or bitrate not in bitrate_list:
-                bitrate = max(bitrate_list) # select highest
-
-            url_list = jsonData[fmt][bitrate]
-        except TypeError: # we have no bitrate info.
-            url_list = jsonData[fmt]
-        return url_list
-
-    def check_urls(self, url_list):
-        """Returns 1st active url from list"""
-        for url in url_list:
-            try:
-                compat_urllib_request.urlopen(url)
-                return url
-            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                url = None
-
-        return None
-
-    def _print_formats(self, formats):
-        print('Available formats:')
-        for fmt in formats.keys():
-            for b in formats[fmt]:
-                try:
-                    ext = formats[fmt][b][0]
-                    print('%s\t%s\t[%s]' % (fmt, b, ext.split('.')[-1]))
-                except TypeError: # we have no bitrate info
-                    ext = formats[fmt][0]
-                    print('%s\t%s\t[%s]' % (fmt, '??', ext.split('.')[-1]))
-                    break
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        # extract uploader & filename from url
-        uploader = mobj.group(1).decode('utf-8')
-        file_id = uploader + "-" + mobj.group(2).decode('utf-8')
-
-        # construct API request
-        file_url = 'http://www.mixcloud.com/api/1/cloudcast/' + '/'.join(url.split('/')[-3:-1]) + '.json'
-        # retrieve .json file with links to files
-        request = compat_urllib_request.Request(file_url)
-        try:
-            self.report_download_json(file_url)
-            jsonData = compat_urllib_request.urlopen(request).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            raise ExtractorError(u'Unable to retrieve file: %s' % compat_str(err))
-
-        # parse JSON
-        json_data = json.loads(jsonData)
-        player_url = json_data['player_swf_url']
-        formats = dict(json_data['audio_formats'])
-
-        req_format = self._downloader.params.get('format', None)
-        bitrate = None
-
-        if self._downloader.params.get('listformats', None):
-            self._print_formats(formats)
-            return
-
-        if req_format is None or req_format == 'best':
-            for format_param in formats.keys():
-                url_list = self.get_urls(formats, format_param)
-                # check urls
-                file_url = self.check_urls(url_list)
-                if file_url is not None:
-                    break # got it!
-        else:
-            if req_format not in formats:
-                raise ExtractorError(u'Format is not available')
-
-            url_list = self.get_urls(formats, req_format)
-            file_url = self.check_urls(url_list)
-            format_param = req_format
-
-        return [{
-            'id': file_id.decode('utf-8'),
-            'url': file_url.decode('utf-8'),
-            'uploader': uploader.decode('utf-8'),
-            'upload_date': None,
-            'title': json_data['name'],
-            'ext': file_url.split('.')[-1].decode('utf-8'),
-            'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
-            'thumbnail': json_data['thumbnail_url'],
-            'description': json_data['description'],
-            'player_url': player_url.decode('utf-8'),
-        }]
-
-class StanfordOpenClassroomIE(InfoExtractor):
-    """Information extractor for Stanford's Open ClassRoom"""
-
-    _VALID_URL = r'^(?:https?://)?openclassroom.stanford.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
-    IE_NAME = u'stanfordoc'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        if mobj.group('course') and mobj.group('video'): # A specific video
-            course = mobj.group('course')
-            video = mobj.group('video')
-            info = {
-                'id': course + '_' + video,
-                'uploader': None,
-                'upload_date': None,
-            }
-
-            self.report_extraction(info['id'])
-            baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
-            xmlUrl = baseUrl + video + '.xml'
-            try:
-                metaXml = compat_urllib_request.urlopen(xmlUrl).read()
-            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                raise ExtractorError(u'Unable to download video info XML: %s' % compat_str(err))
-            mdoc = xml.etree.ElementTree.fromstring(metaXml)
-            try:
-                info['title'] = mdoc.findall('./title')[0].text
-                info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
-            except IndexError:
-                raise ExtractorError(u'Invalid metadata XML file')
-            info['ext'] = info['url'].rpartition('.')[2]
-            return [info]
-        elif mobj.group('course'): # A course page
-            course = mobj.group('course')
-            info = {
-                'id': course,
-                'type': 'playlist',
-                'uploader': None,
-                'upload_date': None,
-            }
-
-            coursepage = self._download_webpage(url, info['id'],
-                                        note='Downloading course info page',
-                                        errnote='Unable to download course info page')
-
-            info['title'] = self._html_search_regex('<h1>([^<]+)</h1>', coursepage, 'title', default=info['id'])
-
-            info['description'] = self._html_search_regex('<description>([^<]+)</description>',
-                coursepage, u'description', fatal=False)
-
-            links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage))
-            info['list'] = [
-                {
-                    'type': 'reference',
-                    'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(vpage),
-                }
-                    for vpage in links]
-            results = []
-            for entry in info['list']:
-                assert entry['type'] == 'reference'
-                results += self.extract(entry['url'])
-            return results
-        else: # Root page
-            info = {
-                'id': 'Stanford OpenClassroom',
-                'type': 'playlist',
-                'uploader': None,
-                'upload_date': None,
-            }
-
-            self.report_download_webpage(info['id'])
-            rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
-            try:
-                rootpage = compat_urllib_request.urlopen(rootURL).read()
-            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-                raise ExtractorError(u'Unable to download course info page: ' + compat_str(err))
-
-            info['title'] = info['id']
-
-            links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage))
-            info['list'] = [
-                {
-                    'type': 'reference',
-                    'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(cpage),
-                }
-                    for cpage in links]
-
-            results = []
-            for entry in info['list']:
-                assert entry['type'] == 'reference'
-                results += self.extract(entry['url'])
-            return results
-
-class MTVIE(InfoExtractor):
-    """Information extractor for MTV.com"""
-
-    _VALID_URL = r'^(?P<proto>https?://)?(?:www\.)?mtv\.com/videos/[^/]+/(?P<videoid>[0-9]+)/[^/]+$'
-    IE_NAME = u'mtv'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        if not mobj.group('proto'):
-            url = 'http://' + url
-        video_id = mobj.group('videoid')
-
-        webpage = self._download_webpage(url, video_id)
-
-        song_name = self._html_search_regex(r'<meta name="mtv_vt" content="([^"]+)"/>',
-            webpage, u'song name', fatal=False)
-
-        video_title = self._html_search_regex(r'<meta name="mtv_an" content="([^"]+)"/>',
-            webpage, u'title')
-
-        mtvn_uri = self._html_search_regex(r'<meta name="mtvn_uri" content="([^"]+)"/>',
-            webpage, u'mtvn_uri', fatal=False)
-
-        content_id = self._search_regex(r'MTVN.Player.defaultPlaylistId = ([0-9]+);',
-            webpage, u'content id', fatal=False)
-
-        videogen_url = 'http://www.mtv.com/player/includes/mediaGen.jhtml?uri=' + mtvn_uri + '&id=' + content_id + '&vid=' + video_id + '&ref=www.mtvn.com&viewUri=' + mtvn_uri
-        self.report_extraction(video_id)
-        request = compat_urllib_request.Request(videogen_url)
-        try:
-            metadataXml = compat_urllib_request.urlopen(request).read()
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
-            raise ExtractorError(u'Unable to download video metadata: %s' % compat_str(err))
-
-        mdoc = xml.etree.ElementTree.fromstring(metadataXml)
-        renditions = mdoc.findall('.//rendition')
-
-        # For now, always pick the highest quality.
-        rendition = renditions[-1]
-
-        try:
-            _,_,ext = rendition.attrib['type'].partition('/')
-            format = ext + '-' + rendition.attrib['width'] + 'x' + rendition.attrib['height'] + '_' + rendition.attrib['bitrate']
-            video_url = rendition.find('./src').text
-        except KeyError:
-            raise ExtractorError('Invalid rendition field.')
-
-        info = {
-            'id': video_id,
-            'url': video_url,
-            'uploader': performer,
-            'upload_date': None,
-            'title': video_title,
-            'ext': ext,
-            'format': format,
-        }
-
-        return [info]
-
-
-class YoukuIE(InfoExtractor):
-    _VALID_URL =  r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html'
-
-    def _gen_sid(self):
-        nowTime = int(time.time() * 1000)
-        random1 = random.randint(1000,1998)
-        random2 = random.randint(1000,9999)
-
-        return "%d%d%d" %(nowTime,random1,random2)
-
-    def _get_file_ID_mix_string(self, seed):
-        mixed = []
-        source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890")
-        seed = float(seed)
-        for i in range(len(source)):
-            seed  =  (seed * 211 + 30031 ) % 65536
-            index  =  math.floor(seed / 65536 * len(source) )
-            mixed.append(source[int(index)])
-            source.remove(source[int(index)])
-        #return ''.join(mixed)
-        return mixed
-
-    def _get_file_id(self, fileId, seed):
-        mixed = self._get_file_ID_mix_string(seed)
-        ids = fileId.split('*')
-        realId = []
-        for ch in ids:
-            if ch:
-                realId.append(mixed[int(ch)])
-        return ''.join(realId)
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        video_id = mobj.group('ID')
-
-        info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
-
-        jsondata = self._download_webpage(info_url, video_id)
-
-        self.report_extraction(video_id)
-        try:
-            config = json.loads(jsondata)
-
-            video_title =  config['data'][0]['title']
-            seed = config['data'][0]['seed']
-
-            format = self._downloader.params.get('format', None)
-            supported_format = list(config['data'][0]['streamfileids'].keys())
-
-            if format is None or format == 'best':
-                if 'hd2' in supported_format:
-                    format = 'hd2'
-                else:
-                    format = 'flv'
-                ext = u'flv'
-            elif format == 'worst':
-                format = 'mp4'
-                ext = u'mp4'
-            else:
-                format = 'flv'
-                ext = u'flv'
-
-
-            fileid = config['data'][0]['streamfileids'][format]
-            keys = [s['k'] for s in config['data'][0]['segs'][format]]
-        except (UnicodeDecodeError, ValueError, KeyError):
-            raise ExtractorError(u'Unable to extract info section')
-
-        files_info=[]
-        sid = self._gen_sid()
-        fileid = self._get_file_id(fileid, seed)
-
-        #column 8,9 of fileid represent the segment number
-        #fileid[7:9] should be changed
-        for index, key in enumerate(keys):
-
-            temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
-            download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
-
-            info = {
-                'id': '%s_part%02d' % (video_id, index),
-                'url': download_url,
-                'uploader': None,
-                'upload_date': None,
-                'title': video_title,
-                'ext': ext,
-            }
-            files_info.append(info)
-
-        return files_info
-
-
-class XNXXIE(InfoExtractor):
-    """Information extractor for xnxx.com"""
-
-    _VALID_URL = r'^(?:https?://)?video\.xnxx\.com/video([0-9]+)/(.*)'
-    IE_NAME = u'xnxx'
-    VIDEO_URL_RE = r'flv_url=(.*?)&amp;'
-    VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM'
-    VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&amp;'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        video_id = mobj.group(1)
-
-        # Get webpage content
-        webpage = self._download_webpage(url, video_id)
-
-        video_url = self._search_regex(self.VIDEO_URL_RE,
-            webpage, u'video URL')
-        video_url = compat_urllib_parse.unquote(video_url)
-
-        video_title = self._html_search_regex(self.VIDEO_TITLE_RE,
-            webpage, u'title')
-
-        video_thumbnail = self._search_regex(self.VIDEO_THUMB_RE,
-            webpage, u'thumbnail', fatal=False)
-
-        return [{
-            'id': video_id,
-            'url': video_url,
-            'uploader': None,
-            'upload_date': None,
-            'title': video_title,
-            'ext': 'flv',
-            'thumbnail': video_thumbnail,
-            'description': None,
-        }]
-
-
-class GooglePlusIE(InfoExtractor):
-    """Information extractor for plus.google.com."""
-
-    _VALID_URL = r'(?:https://)?plus\.google\.com/(?:[^/]+/)*?posts/(\w+)'
-    IE_NAME = u'plus.google'
-
-    def _real_extract(self, url):
-        # Extract id from URL
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        post_url = mobj.group(0)
-        video_id = mobj.group(1)
-
-        video_extension = 'flv'
-
-        # Step 1, Retrieve post webpage to extract further information
-        webpage = self._download_webpage(post_url, video_id, u'Downloading entry webpage')
-
-        self.report_extraction(video_id)
-
-        # Extract update date
-        upload_date = self._html_search_regex('title="Timestamp">(.*?)</a>',
-            webpage, u'upload date', fatal=False)
-        if upload_date:
-            # Convert timestring to a format suitable for filename
-            upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
-            upload_date = upload_date.strftime('%Y%m%d')
-
-        # Extract uploader
-        uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
-            webpage, u'uploader', fatal=False)
-
-        # Extract title
-        # Get the first line for title
-        video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
-            webpage, 'title', default=u'NA')
-
-        # Step 2, Stimulate clicking the image box to launch video
-        video_page = self._search_regex('"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]',
-            webpage, u'video page URL')
-        webpage = self._download_webpage(video_page, video_id, u'Downloading video page')
-
-        # Extract video links on video page
-        """Extract video links of all sizes"""
-        pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
-        mobj = re.findall(pattern, webpage)
-        if len(mobj) == 0:
-            raise ExtractorError(u'Unable to extract video links')
-
-        # Sort in resolution
-        links = sorted(mobj)
-
-        # Choose the lowest of the sort, i.e. highest resolution
-        video_url = links[-1]
-        # Only get the url. The resolution part in the tuple has no use anymore
-        video_url = video_url[-1]
-        # Treat escaped \u0026 style hex
-        try:
-            video_url = video_url.decode("unicode_escape")
-        except AttributeError: # Python 3
-            video_url = bytes(video_url, 'ascii').decode('unicode-escape')
-
-
-        return [{
-            'id':       video_id,
-            'url':      video_url,
-            'uploader': uploader,
-            'upload_date':  upload_date,
-            'title':    video_title,
-            'ext':      video_extension,
-        }]
-
-class NBAIE(InfoExtractor):
-    _VALID_URL = r'^(?:https?://)?(?:watch\.|www\.)?nba\.com/(?:nba/)?video(/[^?]*?)(?:/index\.html)?(?:\?.*)?$'
-    IE_NAME = u'nba'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        video_id = mobj.group(1)
-
-        webpage = self._download_webpage(url, video_id)
-
-        video_url = u'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4'
-
-        shortened_video_id = video_id.rpartition('/')[2]
-        title = self._html_search_regex(r'<meta property="og:title" content="(.*?)"',
-            webpage, 'title', default=shortened_video_id).replace('NBA.com: ', '')
-
-        # It isn't there in the HTML it returns to us
-        # uploader_date = self._html_search_regex(r'<b>Date:</b> (.*?)</div>', webpage, 'upload_date', fatal=False)
-
-        description = self._html_search_regex(r'<meta name="description" (?:content|value)="(.*?)" />', webpage, 'description', fatal=False)
-
-        info = {
-            'id': shortened_video_id,
-            'url': video_url,
-            'ext': 'mp4',
-            'title': title,
-            # 'uploader_date': uploader_date,
-            'description': description,
-        }
-        return [info]
-
-class JustinTVIE(InfoExtractor):
-    """Information extractor for justin.tv and twitch.tv"""
-    # TODO: One broadcast may be split into multiple videos. The key
-    # 'broadcast_id' is the same for all parts, and 'broadcast_part'
-    # starts at 1 and increases. Can we treat all parts as one video?
-
-    _VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?(?:twitch|justin)\.tv/
-        (?:
-            (?P<channelid>[^/]+)|
-            (?:(?:[^/]+)/b/(?P<videoid>[^/]+))|
-            (?:(?:[^/]+)/c/(?P<chapterid>[^/]+))
-        )
-        /?(?:\#.*)?$
-        """
-    _JUSTIN_PAGE_LIMIT = 100
-    IE_NAME = u'justin.tv'
-
-    def report_download_page(self, channel, offset):
-        """Report attempt to download a single page of videos."""
-        self.to_screen(u'%s: Downloading video information from %d to %d' %
-                (channel, offset, offset + self._JUSTIN_PAGE_LIMIT))
-
-    # Return count of items, list of *valid* items
-    def _parse_page(self, url, video_id):
-        webpage = self._download_webpage(url, video_id,
-                                         u'Downloading video info JSON',
-                                         u'unable to download video info JSON')
-
-        response = json.loads(webpage)
-        if type(response) != list:
-            error_text = response.get('error', 'unknown error')
-            raise ExtractorError(u'Justin.tv API: %s' % error_text)
-        info = []
-        for clip in response:
-            video_url = clip['video_file_url']
-            if video_url:
-                video_extension = os.path.splitext(video_url)[1][1:]
-                video_date = re.sub('-', '', clip['start_time'][:10])
-                video_uploader_id = clip.get('user_id', clip.get('channel_id'))
-                video_id = clip['id']
-                video_title = clip.get('title', video_id)
-                info.append({
-                    'id': video_id,
-                    'url': video_url,
-                    'title': video_title,
-                    'uploader': clip.get('channel_name', video_uploader_id),
-                    'uploader_id': video_uploader_id,
-                    'upload_date': video_date,
-                    'ext': video_extension,
-                })
-        return (len(response), info)
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'invalid URL: %s' % url)
-
-        api_base = 'http://api.justin.tv'
-        paged = False
-        if mobj.group('channelid'):
-            paged = True
-            video_id = mobj.group('channelid')
-            api = api_base + '/channel/archives/%s.json' % video_id
-        elif mobj.group('chapterid'):
-            chapter_id = mobj.group('chapterid')
-
-            webpage = self._download_webpage(url, chapter_id)
-            m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage)
-            if not m:
-                raise ExtractorError(u'Cannot find archive of a chapter')
-            archive_id = m.group(1)
-
-            api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
-            chapter_info_xml = self._download_webpage(api, chapter_id,
-                                             note=u'Downloading chapter information',
-                                             errnote=u'Chapter information download failed')
-            doc = xml.etree.ElementTree.fromstring(chapter_info_xml)
-            for a in doc.findall('.//archive'):
-                if archive_id == a.find('./id').text:
-                    break
-            else:
-                raise ExtractorError(u'Could not find chapter in chapter information')
-
-            video_url = a.find('./video_file_url').text
-            video_ext = video_url.rpartition('.')[2] or u'flv'
-
-            chapter_api_url = u'https://api.twitch.tv/kraken/videos/c' + chapter_id
-            chapter_info_json = self._download_webpage(chapter_api_url, u'c' + chapter_id,
-                                   note='Downloading chapter metadata',
-                                   errnote='Download of chapter metadata failed')
-            chapter_info = json.loads(chapter_info_json)
-
-            bracket_start = int(doc.find('.//bracket_start').text)
-            bracket_end = int(doc.find('.//bracket_end').text)
-
-            # TODO determine start (and probably fix up file)
-            #  youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
-            #video_url += u'?start=' + TODO:start_timestamp
-            # bracket_start is 13290, but we want 51670615
-            self._downloader.report_warning(u'Chapter detected, but we can just download the whole file. '
-                                            u'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
-
-            info = {
-                'id': u'c' + chapter_id,
-                'url': video_url,
-                'ext': video_ext,
-                'title': chapter_info['title'],
-                'thumbnail': chapter_info['preview'],
-                'description': chapter_info['description'],
-                'uploader': chapter_info['channel']['display_name'],
-                'uploader_id': chapter_info['channel']['name'],
-            }
-            return [info]
-        else:
-            video_id = mobj.group('videoid')
-            api = api_base + '/broadcast/by_archive/%s.json' % video_id
-
-        self.report_extraction(video_id)
-
-        info = []
-        offset = 0
-        limit = self._JUSTIN_PAGE_LIMIT
-        while True:
-            if paged:
-                self.report_download_page(video_id, offset)
-            page_url = api + ('?offset=%d&limit=%d' % (offset, limit))
-            page_count, page_info = self._parse_page(page_url, video_id)
-            info.extend(page_info)
-            if not paged or page_count != limit:
-                break
-            offset += limit
-        return info
-
-class FunnyOrDieIE(InfoExtractor):
-    _VALID_URL = r'^(?:https?://)?(?:www\.)?funnyordie\.com/videos/(?P<id>[0-9a-f]+)/.*$'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'invalid URL: %s' % url)
-
-        video_id = mobj.group('id')
-        webpage = self._download_webpage(url, video_id)
-
-        video_url = self._html_search_regex(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"',
-            webpage, u'video URL', flags=re.DOTALL)
-
-        title = self._html_search_regex((r"<h1 class='player_page_h1'.*?>(?P<title>.*?)</h1>",
-            r'<title>(?P<title>[^<]+?)</title>'), webpage, 'title', flags=re.DOTALL)
-
-        video_description = self._html_search_regex(r'<meta property="og:description" content="(?P<desc>.*?)"',
-            webpage, u'description', fatal=False, flags=re.DOTALL)
-
-        info = {
-            'id': video_id,
-            'url': video_url,
-            'ext': 'mp4',
-            'title': title,
-            'description': video_description,
-        }
-        return [info]
-
-class SteamIE(InfoExtractor):
-    _VALID_URL = r"""http://store\.steampowered\.com/
-                (agecheck/)?
-                (?P<urltype>video|app)/ #If the page is only for videos or for a game
-                (?P<gameID>\d+)/?
-                (?P<videoID>\d*)(?P<extra>\??) #For urltype == video we sometimes get the videoID
-                """
-    _VIDEO_PAGE_TEMPLATE = 'http://store.steampowered.com/video/%s/'
-    _AGECHECK_TEMPLATE = 'http://store.steampowered.com/agecheck/video/%s/?snr=1_agecheck_agecheck__age-gate&ageDay=1&ageMonth=January&ageYear=1970'
-
-    @classmethod
-    def suitable(cls, url):
-        """Receives a URL and returns True if suitable for this IE."""
-        return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
-
-    def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url, re.VERBOSE)
-        gameID = m.group('gameID')
-
-        videourl = self._VIDEO_PAGE_TEMPLATE % gameID
-        webpage = self._download_webpage(videourl, gameID)
-
-        if re.search('<h2>Please enter your birth date to continue:</h2>', webpage) is not None:
-            videourl = self._AGECHECK_TEMPLATE % gameID
-            self.report_age_confirmation()
-            webpage = self._download_webpage(videourl, gameID)
-
-        self.report_extraction(gameID)
-        game_title = self._html_search_regex(r'<h2 class="pageheader">(.*?)</h2>',
-                                             webpage, 'game title')
-
-        urlRE = r"'movie_(?P<videoID>\d+)': \{\s*FILENAME: \"(?P<videoURL>[\w:/\.\?=]+)\"(,\s*MOVIE_NAME: \"(?P<videoName>[\w:/\.\?=\+-]+)\")?\s*\},"
-        mweb = re.finditer(urlRE, webpage)
-        namesRE = r'<span class="title">(?P<videoName>.+?)</span>'
-        titles = re.finditer(namesRE, webpage)
-        thumbsRE = r'<img class="movie_thumb" src="(?P<thumbnail>.+?)">'
-        thumbs = re.finditer(thumbsRE, webpage)
-        videos = []
-        for vid,vtitle,thumb in zip(mweb,titles,thumbs):
-            video_id = vid.group('videoID')
-            title = vtitle.group('videoName')
-            video_url = vid.group('videoURL')
-            video_thumb = thumb.group('thumbnail')
-            if not video_url:
-                raise ExtractorError(u'Cannot find video url for %s' % video_id)
-            info = {
-                'id':video_id,
-                'url':video_url,
-                'ext': 'flv',
-                'title': unescapeHTML(title),
-                'thumbnail': video_thumb
-                  }
-            videos.append(info)
-        return [self.playlist_result(videos, gameID, game_title)]
-
-class UstreamIE(InfoExtractor):
-    _VALID_URL = r'https?://www\.ustream\.tv/recorded/(?P<videoID>\d+)'
-    IE_NAME = u'ustream'
-
-    def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        video_id = m.group('videoID')
-
-        video_url = u'http://tcdn.ustream.tv/video/%s' % video_id
-        webpage = self._download_webpage(url, video_id)
-
-        self.report_extraction(video_id)
-
-        video_title = self._html_search_regex(r'data-title="(?P<title>.+)"',
-            webpage, u'title')
-
-        uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>',
-            webpage, u'uploader', fatal=False, flags=re.DOTALL)
-
-        thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"',
-            webpage, u'thumbnail', fatal=False)
-
-        info = {
-                'id': video_id,
-                'url': video_url,
-                'ext': 'flv',
-                'title': video_title,
-                'uploader': uploader,
-                'thumbnail': thumbnail,
-               }
-        return info
-
-class WorldStarHipHopIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/videos/video\.php\?v=(?P<id>.*)'
-    IE_NAME = u'WorldStarHipHop'
-
-    def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        video_id = m.group('id')
-
-        webpage_src = self._download_webpage(url, video_id)
-
-        video_url = self._search_regex(r'so\.addVariable\("file","(.*?)"\)',
-            webpage_src, u'video URL')
-
-        if 'mp4' in video_url:
-            ext = 'mp4'
-        else:
-            ext = 'flv'
-
-        video_title = self._html_search_regex(r"<title>(.*)</title>",
-            webpage_src, u'title')
-
-        # Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
-        thumbnail = self._html_search_regex(r'rel="image_src" href="(.*)" />',
-            webpage_src, u'thumbnail', fatal=False)
-
-        if not thumbnail:
-            _title = r"""candytitles.*>(.*)</span>"""
-            mobj = re.search(_title, webpage_src)
-            if mobj is not None:
-                video_title = mobj.group(1)
-
-        results = [{
-                    'id': video_id,
-                    'url' : video_url,
-                    'title' : video_title,
-                    'thumbnail' : thumbnail,
-                    'ext' : ext,
-                    }]
-        return results
-
-class RBMARadioIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?rbmaradio\.com/shows/(?P<videoID>[^/]+)$'
-
-    def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        video_id = m.group('videoID')
-
-        webpage = self._download_webpage(url, video_id)
-
-        json_data = self._search_regex(r'window\.gon.*?gon\.show=(.+?);$',
-            webpage, u'json data', flags=re.MULTILINE)
-
-        try:
-            data = json.loads(json_data)
-        except ValueError as e:
-            raise ExtractorError(u'Invalid JSON: ' + str(e))
-
-        video_url = data['akamai_url'] + '&cbr=256'
-        url_parts = compat_urllib_parse_urlparse(video_url)
-        video_ext = url_parts.path.rpartition('.')[2]
-        info = {
-                'id': video_id,
-                'url': video_url,
-                'ext': video_ext,
-                'title': data['title'],
-                'description': data.get('teaser_text'),
-                'location': data.get('country_of_origin'),
-                'uploader': data.get('host', {}).get('name'),
-                'uploader_id': data.get('host', {}).get('slug'),
-                'thumbnail': data.get('image', {}).get('large_url_2x'),
-                'duration': data.get('duration'),
-        }
-        return [info]
-
-
-class YouPornIE(InfoExtractor):
-    """Information extractor for youporn.com."""
-    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)'
-
-    def _print_formats(self, formats):
-        """Print all available formats"""
-        print(u'Available formats:')
-        print(u'ext\t\tformat')
-        print(u'---------------------------------')
-        for format in formats:
-            print(u'%s\t\t%s'  % (format['ext'], format['format']))
-
-    def _specific(self, req_format, formats):
-        for x in formats:
-            if(x["format"]==req_format):
-                return x
-        return None
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        video_id = mobj.group('videoid')
-
-        req = compat_urllib_request.Request(url)
-        req.add_header('Cookie', 'age_verified=1')
-        webpage = self._download_webpage(req, video_id)
-
-        # Get JSON parameters
-        json_params = self._search_regex(r'var currentVideo = new Video\((.*)\);', webpage, u'JSON parameters')
-        try:
-            params = json.loads(json_params)
-        except:
-            raise ExtractorError(u'Invalid JSON')
-
-        self.report_extraction(video_id)
-        try:
-            video_title = params['title']
-            upload_date = unified_strdate(params['release_date_f'])
-            video_description = params['description']
-            video_uploader = params['submitted_by']
-            thumbnail = params['thumbnails'][0]['image']
-        except KeyError:
-            raise ExtractorError('Missing JSON parameter: ' + sys.exc_info()[1])
-
-        # Get all of the formats available
-        DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
-        download_list_html = self._search_regex(DOWNLOAD_LIST_RE,
-            webpage, u'download list').strip()
-
-        # Get all of the links from the page
-        LINK_RE = r'(?s)<a href="(?P<url>[^"]+)">'
-        links = re.findall(LINK_RE, download_list_html)
-        if(len(links) == 0):
-            raise ExtractorError(u'ERROR: no known formats available for video')
-
-        self.to_screen(u'Links found: %d' % len(links))
-
-        formats = []
-        for link in links:
-
-            # A link looks like this:
-            # http://cdn1.download.youporn.phncdn.com/201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4?nvb=20121113051249&nva=20121114051249&ir=1200&sr=1200&hash=014b882080310e95fb6a0
-            # A path looks like this:
-            # /201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4
-            video_url = unescapeHTML( link )
-            path = compat_urllib_parse_urlparse( video_url ).path
-            extension = os.path.splitext( path )[1][1:]
-            format = path.split('/')[4].split('_')[:2]
-            size = format[0]
-            bitrate = format[1]
-            format = "-".join( format )
-            # title = u'%s-%s-%s' % (video_title, size, bitrate)
-
-            formats.append({
-                'id': video_id,
-                'url': video_url,
-                'uploader': video_uploader,
-                'upload_date': upload_date,
-                'title': video_title,
-                'ext': extension,
-                'format': format,
-                'thumbnail': thumbnail,
-                'description': video_description
-            })
-
-        if self._downloader.params.get('listformats', None):
-            self._print_formats(formats)
-            return
-
-        req_format = self._downloader.params.get('format', None)
-        self.to_screen(u'Format: %s' % req_format)
-
-        if req_format is None or req_format == 'best':
-            return [formats[0]]
-        elif req_format == 'worst':
-            return [formats[-1]]
-        elif req_format in ('-1', 'all'):
-            return formats
-        else:
-            format = self._specific( req_format, formats )
-            if result is None:
-                raise ExtractorError(u'Requested format not available')
-            return [format]
-
-
-
-class PornotubeIE(InfoExtractor):
-    """Information extractor for pornotube.com."""
-    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        video_id = mobj.group('videoid')
-        video_title = mobj.group('title')
-
-        # Get webpage content
-        webpage = self._download_webpage(url, video_id)
-
-        # Get the video URL
-        VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
-        video_url = self._search_regex(VIDEO_URL_RE, webpage, u'video url')
-        video_url = compat_urllib_parse.unquote(video_url)
-
-        #Get the uploaded date
-        VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
-        upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, u'upload date', fatal=False)
-        if upload_date: upload_date = unified_strdate(upload_date)
-
-        info = {'id': video_id,
-                'url': video_url,
-                'uploader': None,
-                'upload_date': upload_date,
-                'title': video_title,
-                'ext': 'flv',
-                'format': 'flv'}
-
-        return [info]
-
-class YouJizzIE(InfoExtractor):
-    """Information extractor for youjizz.com."""
-    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youjizz\.com/videos/(?P<videoid>[^.]+).html$'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        video_id = mobj.group('videoid')
-
-        # Get webpage content
-        webpage = self._download_webpage(url, video_id)
-
-        # Get the video title
-        video_title = self._html_search_regex(r'<title>(?P<title>.*)</title>',
-            webpage, u'title').strip()
-
-        # Get the embed page
-        result = re.search(r'https?://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)', webpage)
-        if result is None:
-            raise ExtractorError(u'ERROR: unable to extract embed page')
-
-        embed_page_url = result.group(0).strip()
-        video_id = result.group('videoid')
-
-        webpage = self._download_webpage(embed_page_url, video_id)
-
-        # Get the video URL
-        video_url = self._search_regex(r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);',
-            webpage, u'video URL')
-
-        info = {'id': video_id,
-                'url': video_url,
-                'title': video_title,
-                'ext': 'flv',
-                'format': 'flv',
-                'player_url': embed_page_url}
-
-        return [info]
-
-class EightTracksIE(InfoExtractor):
-    IE_NAME = '8tracks'
-    _VALID_URL = r'https?://8tracks.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        playlist_id = mobj.group('id')
-
-        webpage = self._download_webpage(url, playlist_id)
-
-        json_like = self._search_regex(r"PAGE.mix = (.*?);\n", webpage, u'trax information', flags=re.DOTALL)
-        data = json.loads(json_like)
-
-        session = str(random.randint(0, 1000000000))
-        mix_id = data['id']
-        track_count = data['tracks_count']
-        first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)
-        next_url = first_url
-        res = []
-        for i in itertools.count():
-            api_json = self._download_webpage(next_url, playlist_id,
-                note=u'Downloading song information %s/%s' % (str(i+1), track_count),
-                errnote=u'Failed to download song information')
-            api_data = json.loads(api_json)
-            track_data = api_data[u'set']['track']
-            info = {
-                'id': track_data['id'],
-                'url': track_data['track_file_stream_url'],
-                'title': track_data['performer'] + u' - ' + track_data['name'],
-                'raw_title': track_data['name'],
-                'uploader_id': data['user']['login'],
-                'ext': 'm4a',
-            }
-            res.append(info)
-            if api_data['set']['at_last_track']:
-                break
-            next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (session, mix_id, track_data['id'])
-        return res
-
-class KeekIE(InfoExtractor):
-    _VALID_URL = r'http://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<videoID>\w+)'
-    IE_NAME = u'keek'
-
-    def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        video_id = m.group('videoID')
-
-        video_url = u'http://cdn.keek.com/keek/video/%s' % video_id
-        thumbnail = u'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
-        webpage = self._download_webpage(url, video_id)
-
-        video_title = self._html_search_regex(r'<meta property="og:title" content="(?P<title>.*?)"',
-            webpage, u'title')
-
-        uploader = self._html_search_regex(r'<div class="user-name-and-bio">[\S\s]+?<h2>(?P<uploader>.+?)</h2>',
-            webpage, u'uploader', fatal=False)
-
-        info = {
-                'id': video_id,
-                'url': video_url,
-                'ext': 'mp4',
-                'title': video_title,
-                'thumbnail': thumbnail,
-                'uploader': uploader
-        }
-        return [info]
-
-class TEDIE(InfoExtractor):
-    _VALID_URL=r'''http://www\.ted\.com/
-                   (
-                        ((?P<type_playlist>playlists)/(?P<playlist_id>\d+)) # We have a playlist
-                        |
-                        ((?P<type_talk>talks)) # We have a simple talk
-                   )
-                   (/lang/(.*?))? # The url may contain the language
-                   /(?P<name>\w+) # Here goes the name and then ".html"
-                   '''
-
-    @classmethod
-    def suitable(cls, url):
-        """Receives a URL and returns True if suitable for this IE."""
-        return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
-
-    def _real_extract(self, url):
-        m=re.match(self._VALID_URL, url, re.VERBOSE)
-        if m.group('type_talk'):
-            return [self._talk_info(url)]
-        else :
-            playlist_id=m.group('playlist_id')
-            name=m.group('name')
-            self.to_screen(u'Getting info of playlist %s: "%s"' % (playlist_id,name))
-            return [self._playlist_videos_info(url,name,playlist_id)]
-
-    def _playlist_videos_info(self,url,name,playlist_id=0):
-        '''Returns the videos of the playlist'''
-        video_RE=r'''
-                     <li\ id="talk_(\d+)"([.\s]*?)data-id="(?P<video_id>\d+)"
-                     ([.\s]*?)data-playlist_item_id="(\d+)"
-                     ([.\s]*?)data-mediaslug="(?P<mediaSlug>.+?)"
-                     '''
-        video_name_RE=r'<p\ class="talk-title"><a href="(?P<talk_url>/talks/(.+).html)">(?P<fullname>.+?)</a></p>'
-        webpage=self._download_webpage(url, playlist_id, 'Downloading playlist webpage')
-        m_videos=re.finditer(video_RE,webpage,re.VERBOSE)
-        m_names=re.finditer(video_name_RE,webpage)
-
-        playlist_title = self._html_search_regex(r'div class="headline">\s*?<h1>\s*?<span>(.*?)</span>',
-                                                 webpage, 'playlist title')
-
-        playlist_entries = []
-        for m_video, m_name in zip(m_videos,m_names):
-            video_id=m_video.group('video_id')
-            talk_url='http://www.ted.com%s' % m_name.group('talk_url')
-            playlist_entries.append(self.url_result(talk_url, 'TED'))
-        return self.playlist_result(playlist_entries, playlist_id = playlist_id, playlist_title = playlist_title)
-
-    def _talk_info(self, url, video_id=0):
-        """Return the video for the talk in the url"""
-        m = re.match(self._VALID_URL, url,re.VERBOSE)
-        video_name = m.group('name')
-        webpage = self._download_webpage(url, video_id, 'Downloading \"%s\" page' % video_name)
-        self.report_extraction(video_name)
-        # If the url includes the language we get the title translated
-        title = self._html_search_regex(r'<span id="altHeadline" >(?P<title>.*)</span>',
-                                        webpage, 'title')
-        json_data = self._search_regex(r'<script.*?>var talkDetails = ({.*?})</script>',
-                                    webpage, 'json data')
-        info = json.loads(json_data)
-        desc = self._html_search_regex(r'<div class="talk-intro">.*?<p.*?>(.*?)</p>',
-                                       webpage, 'description', flags = re.DOTALL)
-        
-        thumbnail = self._search_regex(r'</span>[\s.]*</div>[\s.]*<img src="(.*?)"',
-                                       webpage, 'thumbnail')
-        info = {
-                'id': info['id'],
-                'url': info['htmlStreams'][-1]['file'],
-                'ext': 'mp4',
-                'title': title,
-                'thumbnail': thumbnail,
-                'description': desc,
-                }
-        return info
-
-class MySpassIE(InfoExtractor):
-    _VALID_URL = r'http://www.myspass.de/.*'
-
-    def _real_extract(self, url):
-        META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s'
-
-        # video id is the last path element of the URL
-        # usually there is a trailing slash, so also try the second but last
-        url_path = compat_urllib_parse_urlparse(url).path
-        url_parent_path, video_id = os.path.split(url_path)
-        if not video_id:
-            _, video_id = os.path.split(url_parent_path)
-
-        # get metadata
-        metadata_url = META_DATA_URL_TEMPLATE % video_id
-        metadata_text = self._download_webpage(metadata_url, video_id)
-        metadata = xml.etree.ElementTree.fromstring(metadata_text.encode('utf-8'))
-
-        # extract values from metadata
-        url_flv_el = metadata.find('url_flv')
-        if url_flv_el is None:
-            raise ExtractorError(u'Unable to extract download url')
-        video_url = url_flv_el.text
-        extension = os.path.splitext(video_url)[1][1:]
-        title_el = metadata.find('title')
-        if title_el is None:
-            raise ExtractorError(u'Unable to extract title')
-        title = title_el.text
-        format_id_el = metadata.find('format_id')
-        if format_id_el is None:
-            format = ext
-        else:
-            format = format_id_el.text
-        description_el = metadata.find('description')
-        if description_el is not None:
-            description = description_el.text
-        else:
-            description = None
-        imagePreview_el = metadata.find('imagePreview')
-        if imagePreview_el is not None:
-            thumbnail = imagePreview_el.text
-        else:
-            thumbnail = None
-        info = {
-            'id': video_id,
-            'url': video_url,
-            'title': title,
-            'ext': extension,
-            'format': format,
-            'thumbnail': thumbnail,
-            'description': description
-        }
-        return [info]
-
-class SpiegelIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?(?:#.*)?$'
-
-    def _real_extract(self, url):
-        m = re.match(self._VALID_URL, url)
-        video_id = m.group('videoID')
-
-        webpage = self._download_webpage(url, video_id)
-
-        video_title = self._html_search_regex(r'<div class="module-title">(.*?)</div>',
-            webpage, u'title')
-
-        xml_url = u'http://video2.spiegel.de/flash/' + video_id + u'.xml'
-        xml_code = self._download_webpage(xml_url, video_id,
-                    note=u'Downloading XML', errnote=u'Failed to download XML')
-
-        idoc = xml.etree.ElementTree.fromstring(xml_code)
-        last_type = idoc[-1]
-        filename = last_type.findall('./filename')[0].text
-        duration = float(last_type.findall('./duration')[0].text)
-
-        video_url = 'http://video2.spiegel.de/flash/' + filename
-        video_ext = filename.rpartition('.')[2]
-        info = {
-            'id': video_id,
-            'url': video_url,
-            'ext': video_ext,
-            'title': video_title,
-            'duration': duration,
-        }
-        return [info]
-
-class LiveLeakIE(InfoExtractor):
-
-    _VALID_URL = r'^(?:http?://)?(?:\w+\.)?liveleak\.com/view\?(?:.*?)i=(?P<video_id>[\w_]+)(?:.*)'
-    IE_NAME = u'liveleak'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        video_id = mobj.group('video_id')
-
-        webpage = self._download_webpage(url, video_id)
-
-        video_url = self._search_regex(r'file: "(.*?)",',
-            webpage, u'video URL')
-
-        video_title = self._html_search_regex(r'<meta property="og:title" content="(?P<title>.*?)"',
-            webpage, u'title').replace('LiveLeak.com -', '').strip()
-
-        video_description = self._html_search_regex(r'<meta property="og:description" content="(?P<desc>.*?)"',
-            webpage, u'description', fatal=False)
-
-        video_uploader = self._html_search_regex(r'By:.*?(\w+)</a>',
-            webpage, u'uploader', fatal=False)
-
-        info = {
-            'id':  video_id,
-            'url': video_url,
-            'ext': 'mp4',
-            'title': video_title,
-            'description': video_description,
-            'uploader': video_uploader
-        }
-
-        return [info]
-
-class ARDIE(InfoExtractor):
-    _VALID_URL = r'^(?:https?://)?(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[^/\?]+)(?:\?.*)?'
-    _TITLE = r'<h1(?: class="boxTopHeadline")?>(?P<title>.*)</h1>'
-    _MEDIA_STREAM = r'mediaCollection\.addMediaStream\((?P<media_type>\d+), (?P<quality>\d+), "(?P<rtmp_url>[^"]*)", "(?P<video_url>[^"]*)", "[^"]*"\)'
-
-    def _real_extract(self, url):
-        # determine video id from url
-        m = re.match(self._VALID_URL, url)
-
-        numid = re.search(r'documentId=([0-9]+)', url)
-        if numid:
-            video_id = numid.group(1)
-        else:
-            video_id = m.group('video_id')
-
-        # determine title and media streams from webpage
-        html = self._download_webpage(url, video_id)
-        title = re.search(self._TITLE, html).group('title')
-        streams = [m.groupdict() for m in re.finditer(self._MEDIA_STREAM, html)]
-        if not streams:
-            assert '"fsk"' in html
-            raise ExtractorError(u'This video is only available after 8:00 pm')
-
-        # choose default media type and highest quality for now
-        stream = max([s for s in streams if int(s["media_type"]) == 0],
-                     key=lambda s: int(s["quality"]))
-
-        # there's two possibilities: RTMP stream or HTTP download
-        info = {'id': video_id, 'title': title, 'ext': 'mp4'}
-        if stream['rtmp_url']:
-            self.to_screen(u'RTMP download detected')
-            assert stream['video_url'].startswith('mp4:')
-            info["url"] = stream["rtmp_url"]
-            info["play_path"] = stream['video_url']
-        else:
-            assert stream["video_url"].endswith('.mp4')
-            info["url"] = stream["video_url"]
-        return [info]
-
-class ZDFIE(InfoExtractor):
-    _VALID_URL = r'^http://www\.zdf\.de\/ZDFmediathek\/(.*beitrag\/video\/)(?P<video_id>[^/\?]+)(?:\?.*)?'
-    _TITLE = r'<h1(?: class="beitragHeadline")?>(?P<title>.*)</h1>'
-    _MEDIA_STREAM = r'<a href="(?P<video_url>.+(?P<media_type>.streaming).+/zdf/(?P<quality>[^\/]+)/[^"]*)".+class="play".+>'
-    _MMS_STREAM = r'href="(?P<video_url>mms://[^"]*)"'
-    _RTSP_STREAM = r'(?P<video_url>rtsp://[^"]*.mp4)'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        video_id = mobj.group('video_id')
-
-        html = self._download_webpage(url, video_id)
-        streams = [m.groupdict() for m in re.finditer(self._MEDIA_STREAM, html)]
-        if streams is None:
-            raise ExtractorError(u'No media url found.')
-
-        # s['media_type'] == 'wstreaming' -> use 'Windows Media Player' and mms url
-        # s['media_type'] == 'hstreaming' -> use 'Quicktime' and rtsp url
-        # choose first/default media type and highest quality for now
-        for s in streams:        #find 300 - dsl1000mbit
-            if s['quality'] == '300' and s['media_type'] == 'wstreaming':
-                stream_=s
-                break
-        for s in streams:        #find veryhigh - dsl2000mbit
-            if s['quality'] == 'veryhigh' and s['media_type'] == 'wstreaming': # 'hstreaming' - rtsp is not working
-                stream_=s
-                break
-        if stream_ is None:
-            raise ExtractorError(u'No stream found.')
-
-        media_link = self._download_webpage(stream_['video_url'], video_id,'Get stream URL')
-
-        self.report_extraction(video_id)
-        mobj = re.search(self._TITLE, html)
-        if mobj is None:
-            raise ExtractorError(u'Cannot extract title')
-        title = unescapeHTML(mobj.group('title'))
-
-        mobj = re.search(self._MMS_STREAM, media_link)
-        if mobj is None:
-            mobj = re.search(self._RTSP_STREAM, media_link)
-            if mobj is None:
-                raise ExtractorError(u'Cannot extract mms:// or rtsp:// URL')
-        mms_url = mobj.group('video_url')
-
-        mobj = re.search('(.*)[.](?P<ext>[^.]+)', mms_url)
-        if mobj is None:
-            raise ExtractorError(u'Cannot extract extention')
-        ext = mobj.group('ext')
-
-        return [{'id': video_id,
-                 'url': mms_url,
-                 'title': title,
-                 'ext': ext
-                 }]
-
-class TumblrIE(InfoExtractor):
-    _VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/((post)|(video))/(?P<id>\d*)/(.*?)'
-
-    def _real_extract(self, url):
-        m_url = re.match(self._VALID_URL, url)
-        video_id = m_url.group('id')
-        blog = m_url.group('blog_name')
-
-        url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id)
-        webpage = self._download_webpage(url, video_id)
-
-        re_video = r'src=\\x22(?P<video_url>http://%s\.tumblr\.com/video_file/%s/(.*?))\\x22 type=\\x22video/(?P<ext>.*?)\\x22' % (blog, video_id)
-        video = re.search(re_video, webpage)
-        if video is None:
-           raise ExtractorError(u'Unable to extract video')
-        video_url = video.group('video_url')
-        ext = video.group('ext')
-
-        video_thumbnail = self._search_regex(r'posters(.*?)\[\\x22(?P<thumb>.*?)\\x22',
-            webpage, u'thumbnail', fatal=False)  # We pick the first poster
-        if video_thumbnail: video_thumbnail = video_thumbnail.replace('\\', '')
-
-        # The only place where you can get a title, it's not complete,
-        # but searching in other places doesn't work for all videos
-        video_title = self._html_search_regex(r'<title>(?P<title>.*?)</title>',
-            webpage, u'title', flags=re.DOTALL)
-
-        return [{'id': video_id,
-                 'url': video_url,
-                 'title': video_title,
-                 'thumbnail': video_thumbnail,
-                 'ext': ext
-                 }]
-
-class BandcampIE(InfoExtractor):
-    _VALID_URL = r'http://.*?\.bandcamp\.com/track/(?P<title>.*)'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        title = mobj.group('title')
-        webpage = self._download_webpage(url, title)
-        # We get the link to the free download page
-        m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
-        if m_download is None:
-            raise ExtractorError(u'No free songs found')
-
-        download_link = m_download.group(1)
-        id = re.search(r'var TralbumData = {(.*?)id: (?P<id>\d*?)$', 
-                       webpage, re.MULTILINE|re.DOTALL).group('id')
-
-        download_webpage = self._download_webpage(download_link, id,
-                                                  'Downloading free downloads page')
-        # We get the dictionary of the track from some javascrip code
-        info = re.search(r'items: (.*?),$',
-                         download_webpage, re.MULTILINE).group(1)
-        info = json.loads(info)[0]
-        # We pick mp3-320 for now, until format selection can be easily implemented.
-        mp3_info = info[u'downloads'][u'mp3-320']
-        # If we try to use this url it says the link has expired
-        initial_url = mp3_info[u'url']
-        re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
-        m_url = re.match(re_url, initial_url)
-        #We build the url we will use to get the final track url
-        # This url is build in Bandcamp in the script download_bunde_*.js
-        request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), id, m_url.group('ts'))
-        final_url_webpage = self._download_webpage(request_url, id, 'Requesting download url')
-        # If we could correctly generate the .rand field the url would be
-        #in the "download_url" key
-        final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
-
-        track_info = {'id':id,
-                      'title' : info[u'title'],
-                      'ext' :   'mp3',
-                      'url' :   final_url,
-                      'thumbnail' : info[u'thumb_url'],
-                      'uploader' :  info[u'artist']
-                      }
-
-        return [track_info]
-
-class RedTubeIE(InfoExtractor):
-    """Information Extractor for redtube"""
-    _VALID_URL = r'(?:http://)?(?:www\.)?redtube\.com/(?P<id>[0-9]+)'
-
-    def _real_extract(self,url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-
-        video_id = mobj.group('id')
-        video_extension = 'mp4'        
-        webpage = self._download_webpage(url, video_id)
-
-        self.report_extraction(video_id)
-
-        video_url = self._html_search_regex(r'<source src="(.+?)" type="video/mp4">',
-            webpage, u'video URL')
-
-        video_title = self._html_search_regex('<h1 class="videoTitle slidePanelMovable">(.+?)</h1>',
-            webpage, u'title')
-
-        return [{
-            'id':       video_id,
-            'url':      video_url,
-            'ext':      video_extension,
-            'title':    video_title,
-        }]
-        
-class InaIE(InfoExtractor):
-    """Information Extractor for Ina.fr"""
-    _VALID_URL = r'(?:http://)?(?:www\.)?ina\.fr/video/(?P<id>I[0-9]+)/.*'
-
-    def _real_extract(self,url):
-        mobj = re.match(self._VALID_URL, url)
-
-        video_id = mobj.group('id')
-        mrss_url='http://player.ina.fr/notices/%s.mrss' % video_id
-        video_extension = 'mp4'
-        webpage = self._download_webpage(mrss_url, video_id)
-
-        self.report_extraction(video_id)
-
-        video_url = self._html_search_regex(r'<media:player url="(?P<mp4url>http://mp4.ina.fr/[^"]+\.mp4)',
-            webpage, u'video URL')
-
-        video_title = self._search_regex(r'<title><!\[CDATA\[(?P<titre>.*?)]]></title>',
-            webpage, u'title')
-
-        return [{
-            'id':       video_id,
-            'url':      video_url,
-            'ext':      video_extension,
-            'title':    video_title,
-        }]
-
-class HowcastIE(InfoExtractor):
-    """Information Extractor for Howcast.com"""
-    _VALID_URL = r'(?:https?://)?(?:www\.)?howcast\.com/videos/(?P<id>\d+)'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-
-        video_id = mobj.group('id')
-        webpage_url = 'http://www.howcast.com/videos/' + video_id
-        webpage = self._download_webpage(webpage_url, video_id)
-
-        self.report_extraction(video_id)
-
-        video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)',
-            webpage, u'video URL')
-
-        video_title = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') property=\'og:title\'',
-            webpage, u'title')
-
-        video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'',
-            webpage, u'description', fatal=False)
-
-        thumbnail = self._html_search_regex(r'<meta content=\'(.+?)\' property=\'og:image\'',
-            webpage, u'thumbnail', fatal=False)
-
-        return [{
-            'id':       video_id,
-            'url':      video_url,
-            'ext':      'mp4',
-            'title':    video_title,
-            'description': video_description,
-            'thumbnail': thumbnail,
-        }]
-
-class VineIE(InfoExtractor):
-    """Information Extractor for Vine.co"""
-    _VALID_URL = r'(?:https?://)?(?:www\.)?vine\.co/v/(?P<id>\w+)'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-
-        video_id = mobj.group('id')
-        webpage_url = 'https://vine.co/v/' + video_id
-        webpage = self._download_webpage(webpage_url, video_id)
-
-        self.report_extraction(video_id)
-
-        video_url = self._html_search_regex(r'<meta property="twitter:player:stream" content="(.+?)"',
-            webpage, u'video URL')
-
-        video_title = self._html_search_regex(r'<meta property="og:title" content="(.+?)"',
-            webpage, u'title')
-
-        thumbnail = self._html_search_regex(r'<meta property="og:image" content="(.+?)(\?.*?)?"',
-            webpage, u'thumbnail', fatal=False)
-
-        uploader = self._html_search_regex(r'<div class="user">.*?<h2>(.+?)</h2>',
-            webpage, u'uploader', fatal=False, flags=re.DOTALL)
-
-        return [{
-            'id':        video_id,
-            'url':       video_url,
-            'ext':       'mp4',
-            'title':     video_title,
-            'thumbnail': thumbnail,
-            'uploader':  uploader,
-        }]
-
-class FlickrIE(InfoExtractor):
-    """Information Extractor for Flickr videos"""
-    _VALID_URL = r'(?:https?://)?(?:www\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-
-        video_id = mobj.group('id')
-        video_uploader_id = mobj.group('uploader_id')
-        webpage_url = 'http://www.flickr.com/photos/' + video_uploader_id + '/' + video_id
-        webpage = self._download_webpage(webpage_url, video_id)
-
-        secret = self._search_regex(r"photo_secret: '(\w+)'", webpage, u'secret')
-
-        first_url = 'https://secure.flickr.com/apps/video/video_mtl_xml.gne?v=x&photo_id=' + video_id + '&secret=' + secret + '&bitrate=700&target=_self'
-        first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage')
-
-        node_id = self._html_search_regex(r'<Item id="id">(\d+-\d+)</Item>',
-            first_xml, u'node_id')
-
-        second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1'
-        second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage')
-
-        self.report_extraction(video_id)
-
-        mobj = re.search(r'<STREAM APP="(.+?)" FULLPATH="(.+?)"', second_xml)
-        if mobj is None:
-            raise ExtractorError(u'Unable to extract video url')
-        video_url = mobj.group(1) + unescapeHTML(mobj.group(2))
-
-        video_title = self._html_search_regex(r'<meta property="og:title" content=(?:"([^"]+)"|\'([^\']+)\')',
-            webpage, u'video title')
-
-        video_description = self._html_search_regex(r'<meta property="og:description" content=(?:"([^"]+)"|\'([^\']+)\')',
-            webpage, u'description', fatal=False)
-
-        thumbnail = self._html_search_regex(r'<meta property="og:image" content=(?:"([^"]+)"|\'([^\']+)\')',
-            webpage, u'thumbnail', fatal=False)
-
-        return [{
-            'id':          video_id,
-            'url':         video_url,
-            'ext':         'mp4',
-            'title':       video_title,
-            'description': video_description,
-            'thumbnail':   thumbnail,
-            'uploader_id': video_uploader_id,
-        }]
-
-class TeamcocoIE(InfoExtractor):
-    _VALID_URL = r'http://teamcoco\.com/video/(?P<url_title>.*)'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        url_title = mobj.group('url_title')
-        webpage = self._download_webpage(url, url_title)
-
-        video_id = self._html_search_regex(r'<article class="video" data-id="(\d+?)"',
-            webpage, u'video id')
-
-        self.report_extraction(video_id)
-
-        video_title = self._html_search_regex(r'<meta property="og:title" content="(.+?)"',
-            webpage, u'title')
-
-        thumbnail = self._html_search_regex(r'<meta property="og:image" content="(.+?)"',
-            webpage, u'thumbnail', fatal=False)
-
-        video_description = self._html_search_regex(r'<meta property="og:description" content="(.*?)"',
-            webpage, u'description', fatal=False)
-
-        data_url = 'http://teamcoco.com/cvp/2.0/%s.xml' % video_id
-        data = self._download_webpage(data_url, video_id, 'Downloading data webpage')
-
-        video_url = self._html_search_regex(r'<file type="high".*?>(.*?)</file>',
-            data, u'video URL')
-
-        return [{
-            'id':          video_id,
-            'url':         video_url,
-            'ext':         'mp4',
-            'title':       video_title,
-            'thumbnail':   thumbnail,
-            'description': video_description,
-        }]
-
-class XHamsterIE(InfoExtractor):
-    """Information Extractor for xHamster"""
-    _VALID_URL = r'(?:http://)?(?:www.)?xhamster\.com/movies/(?P<id>[0-9]+)/.*\.html'
-
-    def _real_extract(self,url):
-        mobj = re.match(self._VALID_URL, url)
-
-        video_id = mobj.group('id')
-        mrss_url = 'http://xhamster.com/movies/%s/.html' % video_id
-        webpage = self._download_webpage(mrss_url, video_id)
-
-        mobj = re.search(r'\'srv\': \'(?P<server>[^\']*)\',\s*\'file\': \'(?P<file>[^\']+)\',', webpage)
-        if mobj is None:
-            raise ExtractorError(u'Unable to extract media URL')
-        if len(mobj.group('server')) == 0:
-            video_url = compat_urllib_parse.unquote(mobj.group('file'))
-        else:
-            video_url = mobj.group('server')+'/key='+mobj.group('file')
-        video_extension = video_url.split('.')[-1]
-
-        video_title = self._html_search_regex(r'<title>(?P<title>.+?) - xHamster\.com</title>',
-            webpage, u'title')
-
-        # Can't see the description anywhere in the UI
-        # video_description = self._html_search_regex(r'<span>Description: </span>(?P<description>[^<]+)',
-        #     webpage, u'description', fatal=False)
-        # if video_description: video_description = unescapeHTML(video_description)
-
-        mobj = re.search(r'hint=\'(?P<upload_date_Y>[0-9]{4})-(?P<upload_date_m>[0-9]{2})-(?P<upload_date_d>[0-9]{2}) [0-9]{2}:[0-9]{2}:[0-9]{2} [A-Z]{3,4}\'', webpage)
-        if mobj:
-            video_upload_date = mobj.group('upload_date_Y')+mobj.group('upload_date_m')+mobj.group('upload_date_d')
-        else:
-            video_upload_date = None
-            self._downloader.report_warning(u'Unable to extract upload date')
-
-        video_uploader_id = self._html_search_regex(r'<a href=\'/user/[^>]+>(?P<uploader_id>[^<]+)',
-            webpage, u'uploader id', default=u'anonymous')
-
-        video_thumbnail = self._search_regex(r'\'image\':\'(?P<thumbnail>[^\']+)\'',
-            webpage, u'thumbnail', fatal=False)
-
-        return [{
-            'id':       video_id,
-            'url':      video_url,
-            'ext':      video_extension,
-            'title':    video_title,
-            # 'description': video_description,
-            'upload_date': video_upload_date,
-            'uploader_id': video_uploader_id,
-            'thumbnail': video_thumbnail
-        }]
-
-class HypemIE(InfoExtractor):
-    """Information Extractor for hypem"""
-    _VALID_URL = r'(?:http://)?(?:www\.)?hypem\.com/track/([^/]+)/([^/]+)'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        track_id = mobj.group(1)
-
-        data = { 'ax': 1, 'ts': time.time() }
-        data_encoded = compat_urllib_parse.urlencode(data)
-        complete_url = url + "?" + data_encoded
-        request = compat_urllib_request.Request(complete_url)
-        response, urlh = self._download_webpage_handle(request, track_id, u'Downloading webpage with the url')
-        cookie = urlh.headers.get('Set-Cookie', '')
-
-        self.report_extraction(track_id)
-
-        html_tracks = self._html_search_regex(r'<script type="application/json" id="displayList-data">(.*?)</script>',
-            response, u'tracks', flags=re.MULTILINE|re.DOTALL).strip()
-        try:
-            track_list = json.loads(html_tracks)
-            track = track_list[u'tracks'][0]
-        except ValueError:
-            raise ExtractorError(u'Hypemachine contained invalid JSON.')
-
-        key = track[u"key"]
-        track_id = track[u"id"]
-        artist = track[u"artist"]
-        title = track[u"song"]
-
-        serve_url = "http://hypem.com/serve/source/%s/%s" % (compat_str(track_id), compat_str(key))
-        request = compat_urllib_request.Request(serve_url, "" , {'Content-Type': 'application/json'})
-        request.add_header('cookie', cookie)
-        song_data_json = self._download_webpage(request, track_id, u'Downloading metadata')
-        try:
-            song_data = json.loads(song_data_json)
-        except ValueError:
-            raise ExtractorError(u'Hypemachine contained invalid JSON.')
-        final_url = song_data[u"url"]
-
-        return [{
-            'id':       track_id,
-            'url':      final_url,
-            'ext':      "mp3",
-            'title':    title,
-            'artist':   artist,
-        }]
-
-class Vbox7IE(InfoExtractor):
-    """Information Extractor for Vbox7"""
-    _VALID_URL = r'(?:http://)?(?:www\.)?vbox7\.com/play:([^/]+)'
-
-    def _real_extract(self,url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        video_id = mobj.group(1)
-
-        redirect_page, urlh = self._download_webpage_handle(url, video_id)
-        new_location = self._search_regex(r'window\.location = \'(.*)\';', redirect_page, u'redirect location')
-        redirect_url = urlh.geturl() + new_location
-        webpage = self._download_webpage(redirect_url, video_id, u'Downloading redirect page')
-
-        title = self._html_search_regex(r'<title>(.*)</title>',
-            webpage, u'title').split('/')[0].strip()
-
-        ext = "flv"
-        info_url = "http://vbox7.com/play/magare.do"
-        data = compat_urllib_parse.urlencode({'as3':'1','vid':video_id})
-        info_request = compat_urllib_request.Request(info_url, data)
-        info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
-        info_response = self._download_webpage(info_request, video_id, u'Downloading info webpage')
-        if info_response is None:
-            raise ExtractorError(u'Unable to extract the media url')
-        (final_url, thumbnail_url) = map(lambda x: x.split('=')[1], info_response.split('&'))
-
-        return [{
-            'id':        video_id,
-            'url':       final_url,
-            'ext':       ext,
-            'title':     title,
-            'thumbnail': thumbnail_url,
-        }]
-
-class GametrailersIE(InfoExtractor):
-    _VALID_URL = r'http://www.gametrailers.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError(u'Invalid URL: %s' % url)
-        video_id = mobj.group('id')
-        video_type = mobj.group('type')
-        webpage = self._download_webpage(url, video_id)
-        if video_type == 'full-episodes':
-            mgid_re = r'data-video="(?P<mgid>mgid:.*?)"'
-        else:
-            mgid_re = r'data-contentId=\'(?P<mgid>mgid:.*?)\''
-        mgid = self._search_regex(mgid_re, webpage, u'mgid')
-        data = compat_urllib_parse.urlencode({'uri': mgid, 'acceptMethods': 'fms'})
-
-        info_page = self._download_webpage('http://www.gametrailers.com/feeds/mrss?' + data,
-                                           video_id, u'Downloading video info')
-        links_webpage = self._download_webpage('http://www.gametrailers.com/feeds/mediagen/?' + data,
-                                               video_id, u'Downloading video urls info')
-
-        self.report_extraction(video_id)
-        info_re = r'''<title><!\[CDATA\[(?P<title>.*?)\]\]></title>.*
-                      <description><!\[CDATA\[(?P<description>.*?)\]\]></description>.*
-                      '''
-
-        m_info = re.search(info_re, info_page, re.VERBOSE|re.DOTALL)
-        if m_info is None:
-            raise ExtractorError(u'Unable to extract video info')
-        video_title = m_info.group('title')
-        video_description = m_info.group('description')
-        video_thumb = m_info.group('thumb')
-
-        m_urls = list(re.finditer(r'<src>(?P<url>.*)</src>', links_webpage))
-        if m_urls is None or len(m_urls) == 0:
-            raise ExtractError(u'Unable to extrat video url')
-        # They are sorted from worst to best quality
-        video_url = m_urls[-1].group('url')
-
-        return {'url':         video_url,
-                'id':          video_id,
-                'title':       video_title,
-                # Videos are actually flv not mp4
-                'ext':         'flv',
-                'thumbnail':   video_thumb,
-                'description': video_description,
-                }
-
-def gen_extractors():
-    """ Return a list of an instance of every supported extractor.
-    The order does matter; the first extractor matched is the one handling the URL.
-    """
-    return [
-        YoutubePlaylistIE(),
-        YoutubeChannelIE(),
-        YoutubeUserIE(),
-        YoutubeSearchIE(),
-        YoutubeIE(),
-        MetacafeIE(),
-        DailymotionIE(),
-        GoogleSearchIE(),
-        PhotobucketIE(),
-        YahooIE(),
-        YahooSearchIE(),
-        DepositFilesIE(),
-        FacebookIE(),
-        BlipTVIE(),
-        BlipTVUserIE(),
-        VimeoIE(),
-        MyVideoIE(),
-        ComedyCentralIE(),
-        EscapistIE(),
-        CollegeHumorIE(),
-        XVideosIE(),
-        SoundcloudSetIE(),
-        SoundcloudIE(),
-        InfoQIE(),
-        MixcloudIE(),
-        StanfordOpenClassroomIE(),
-        MTVIE(),
-        YoukuIE(),
-        XNXXIE(),
-        YouJizzIE(),
-        PornotubeIE(),
-        YouPornIE(),
-        GooglePlusIE(),
-        ArteTvIE(),
-        NBAIE(),
-        WorldStarHipHopIE(),
-        JustinTVIE(),
-        FunnyOrDieIE(),
-        SteamIE(),
-        UstreamIE(),
-        RBMARadioIE(),
-        EightTracksIE(),
-        KeekIE(),
-        TEDIE(),
-        MySpassIE(),
-        SpiegelIE(),
-        LiveLeakIE(),
-        ARDIE(),
-        ZDFIE(),
-        TumblrIE(),
-        BandcampIE(),
-        RedTubeIE(),
-        InaIE(),
-        HowcastIE(),
-        VineIE(),
-        FlickrIE(),
-        TeamcocoIE(),
-        XHamsterIE(),
-        HypemIE(),
-        Vbox7IE(),
-        GametrailersIE(),
-        GenericIE()
-    ]
-
-def get_info_extractor(ie_name):
-    """Returns the info extractor class with the given ie_name"""
-    return globals()[ie_name+'IE']
+from .extractor.common import InfoExtractor, SearchInfoExtractor
+from .extractor import gen_extractors, get_info_extractor
diff --git a/youtube_dl/PostProcessor.py b/youtube_dl/PostProcessor.py
index 8868b37..8c5e539 100644
--- a/youtube_dl/PostProcessor.py
+++ b/youtube_dl/PostProcessor.py
@@ -1,8 +1,3 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from __future__ import absolute_import
-
 import os
 import subprocess
 import sys
diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py
index 9279ce7..a8b62a6 100644
--- a/youtube_dl/__init__.py
+++ b/youtube_dl/__init__.py
@@ -1,9 +1,6 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 
-from __future__ import with_statement
-from __future__ import absolute_import
-
 __authors__  = (
     'Ricardo Garcia Gonzalez',
     'Danny Colligan',
@@ -48,7 +45,7 @@ from .utils import *
 from .update import update_self
 from .version import __version__
 from .FileDownloader import *
-from .InfoExtractors import gen_extractors
+from .extractor import gen_extractors
 from .PostProcessor import *
 
 def parseOpts(overrideArguments=None):
@@ -200,7 +197,7 @@ def parseOpts(overrideArguments=None):
             action='store_true', dest='listsubtitles',
             help='lists all available subtitles for the video (currently youtube only)', default=False)
     video_format.add_option('--sub-format',
-            action='store', dest='subtitlesformat', metavar='LANG',
+            action='store', dest='subtitlesformat', metavar='FORMAT',
             help='subtitle format [srt/sbv] (default=srt) (currently youtube only)', default='srt')
     video_format.add_option('--sub-lang', '--srt-lang',
             action='store', dest='subtitleslang', metavar='LANG',
@@ -423,7 +420,7 @@ def _real_main(argv=None):
     if opts.usenetrc and (opts.username is not None or opts.password is not None):
         parser.error(u'using .netrc conflicts with giving username/password')
     if opts.password is not None and opts.username is None:
-        parser.error(u'account username missing')
+        print(u'WARNING: account username missing')
     if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
         parser.error(u'using output template conflicts with using title, video ID or auto number')
     if opts.usetitle and opts.useid:
diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py
new file mode 100644
index 0000000..b208f90
--- /dev/null
+++ b/youtube_dl/extractor/__init__.py
@@ -0,0 +1,133 @@
+
+from .ard import ARDIE
+from .arte import ArteTvIE
+from .bandcamp import BandcampIE
+from .bliptv import BlipTVIE, BlipTVUserIE
+from .breakcom import BreakIE
+from .comedycentral import ComedyCentralIE
+from .collegehumor import CollegeHumorIE
+from .dailymotion import DailymotionIE
+from .depositfiles import DepositFilesIE
+from .eighttracks import EightTracksIE
+from .escapist import EscapistIE
+from .facebook import FacebookIE
+from .flickr import FlickrIE
+from .funnyordie import FunnyOrDieIE
+from .gametrailers import GametrailersIE
+from .generic import GenericIE
+from .googleplus import GooglePlusIE
+from .googlesearch import GoogleSearchIE
+from .howcast import HowcastIE
+from .hypem import HypemIE
+from .ina import InaIE
+from .infoq import InfoQIE
+from .justintv import JustinTVIE
+from .keek import KeekIE
+from .liveleak import LiveLeakIE
+from .metacafe import MetacafeIE
+from .mixcloud import MixcloudIE
+from .mtv import MTVIE
+from .myspass import MySpassIE
+from .myvideo import MyVideoIE
+from .nba import NBAIE
+from .statigram import StatigramIE
+from .photobucket import PhotobucketIE
+from .pornotube import PornotubeIE
+from .rbmaradio import RBMARadioIE
+from .redtube import RedTubeIE
+from .soundcloud import SoundcloudIE, SoundcloudSetIE
+from .spiegel import SpiegelIE
+from .stanfordoc import StanfordOpenClassroomIE
+from .steam import SteamIE
+from .teamcoco import TeamcocoIE
+from .ted import TEDIE
+from .tumblr import TumblrIE
+from .ustream import UstreamIE
+from .vbox7 import Vbox7IE
+from .vimeo import VimeoIE
+from .vine import VineIE
+from .worldstarhiphop import WorldStarHipHopIE
+from .xnxx import XNXXIE
+from .xhamster import XHamsterIE
+from .xvideos import XVideosIE
+from .yahoo import YahooIE, YahooSearchIE
+from .youjizz import YouJizzIE
+from .youku import YoukuIE
+from .youporn import YouPornIE
+from .youtube import YoutubeIE, YoutubePlaylistIE, YoutubeSearchIE, YoutubeUserIE, YoutubeChannelIE
+from .zdf import ZDFIE
+
+def gen_extractors():
+    """ Return a list of an instance of every supported extractor.
+    The order does matter; the first extractor matched is the one handling the URL.
+    """
+    return [
+        YoutubePlaylistIE(),
+        YoutubeChannelIE(),
+        YoutubeUserIE(),
+        YoutubeSearchIE(),
+        YoutubeIE(),
+        MetacafeIE(),
+        DailymotionIE(),
+        GoogleSearchIE(),
+        PhotobucketIE(),
+        YahooIE(),
+        YahooSearchIE(),
+        DepositFilesIE(),
+        FacebookIE(),
+        BlipTVIE(),
+        BlipTVUserIE(),
+        VimeoIE(),
+        MyVideoIE(),
+        ComedyCentralIE(),
+        EscapistIE(),
+        CollegeHumorIE(),
+        XVideosIE(),
+        SoundcloudSetIE(),
+        SoundcloudIE(),
+        InfoQIE(),
+        MixcloudIE(),
+        StanfordOpenClassroomIE(),
+        MTVIE(),
+        YoukuIE(),
+        XNXXIE(),
+        YouJizzIE(),
+        PornotubeIE(),
+        YouPornIE(),
+        GooglePlusIE(),
+        ArteTvIE(),
+        NBAIE(),
+        WorldStarHipHopIE(),
+        JustinTVIE(),
+        FunnyOrDieIE(),
+        SteamIE(),
+        UstreamIE(),
+        RBMARadioIE(),
+        EightTracksIE(),
+        KeekIE(),
+        TEDIE(),
+        MySpassIE(),
+        SpiegelIE(),
+        LiveLeakIE(),
+        ARDIE(),
+        ZDFIE(),
+        TumblrIE(),
+        BandcampIE(),
+        RedTubeIE(),
+        InaIE(),
+        HowcastIE(),
+        VineIE(),
+        FlickrIE(),
+        TeamcocoIE(),
+        XHamsterIE(),
+        HypemIE(),
+        Vbox7IE(),
+        GametrailersIE(),
+        StatigramIE(),
+        BreakIE(),
+        GenericIE()
+    ]
+
+def get_info_extractor(ie_name):
+    """Returns the info extractor class with the given ie_name"""
+    return globals()[ie_name+'IE']
diff --git a/youtube_dl/extractor/ard.py b/youtube_dl/extractor/ard.py
new file mode 100644
index 0000000..e1ecdf4
--- /dev/null
+++ b/youtube_dl/extractor/ard.py
@@ -0,0 +1,45 @@
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+)
+
+class ARDIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[^/\?]+)(?:\?.*)?'
+    _TITLE = r'<h1(?: class="boxTopHeadline")?>(?P<title>.*)</h1>'
+    _MEDIA_STREAM = r'mediaCollection\.addMediaStream\((?P<media_type>\d+), (?P<quality>\d+), "(?P<rtmp_url>[^"]*)", "(?P<video_url>[^"]*)", "[^"]*"\)'
+
+    def _real_extract(self, url):
+        # determine video id from url
+        m = re.match(self._VALID_URL, url)
+
+        numid = re.search(r'documentId=([0-9]+)', url)
+        if numid:
+            video_id = numid.group(1)
+        else:
+            video_id = m.group('video_id')
+
+        # determine title and media streams from webpage
+        html = self._download_webpage(url, video_id)
+        title = re.search(self._TITLE, html).group('title')
+        streams = [m.groupdict() for m in re.finditer(self._MEDIA_STREAM, html)]
+        if not streams:
+            assert '"fsk"' in html
+            raise ExtractorError(u'This video is only available after 8:00 pm')
+
+        # choose default media type and highest quality for now
+        stream = max([s for s in streams if int(s["media_type"]) == 0],
+                     key=lambda s: int(s["quality"]))
+
+        # there's two possibilities: RTMP stream or HTTP download
+        info = {'id': video_id, 'title': title, 'ext': 'mp4'}
+        if stream['rtmp_url']:
+            self.to_screen(u'RTMP download detected')
+            assert stream['video_url'].startswith('mp4:')
+            info["url"] = stream["rtmp_url"]
+            info["play_path"] = stream['video_url']
+        else:
+            assert stream["video_url"].endswith('.mp4')
+            info["url"] = stream["video_url"]
+        return [info]
diff --git a/youtube_dl/extractor/arte.py b/youtube_dl/extractor/arte.py
new file mode 100644
index 0000000..82e3ffe
--- /dev/null
+++ b/youtube_dl/extractor/arte.py
@@ -0,0 +1,136 @@
+import re
+import socket
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_http_client,
+    compat_str,
+    compat_urllib_error,
+    compat_urllib_parse,
+    compat_urllib_request,
+
+    ExtractorError,
+    unified_strdate,
+)
+
+class ArteTvIE(InfoExtractor):
+    """arte.tv information extractor."""
+
+    _VALID_URL = r'(?:http://)?videos\.arte\.tv/(?:fr|de)/videos/.*'
+    _LIVE_URL = r'index-[0-9]+\.html$'
+
+    IE_NAME = u'arte.tv'
+
+    def fetch_webpage(self, url):
+        request = compat_urllib_request.Request(url)
+        try:
+            self.report_download_webpage(url)
+            webpage = compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            raise ExtractorError(u'Unable to retrieve video webpage: %s' % compat_str(err))
+        except ValueError as err:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+        return webpage
+
+    def grep_webpage(self, url, regex, regexFlags, matchTuples):
+        page = self.fetch_webpage(url)
+        mobj = re.search(regex, page, regexFlags)
+        info = {}
+
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        for (i, key, err) in matchTuples:
+            if mobj.group(i) is None:
+                raise ExtractorError(err)
+            else:
+                info[key] = mobj.group(i)
+
+        return info
+
+    # TODO implement Live Stream
+    # def extractLiveStream(self, url):
+    #     video_lang = url.split('/')[-4]
+    #     info = self.grep_webpage(
+    #         url,
+    #         r'src="(.*?/videothek_js.*?\.js)',
+    #         0,
+    #         [
+    #             (1, 'url', u'Invalid URL: %s' % url)
+    #         ]
+    #     )
+    #     http_host = url.split('/')[2]
+    #     next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url')))
+    #     info = self.grep_webpage(
+    #         next_url,
+    #         r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
+    #             '(http://.*?\.swf).*?' +
+    #             '(rtmp://.*?)\'',
+    #         re.DOTALL,
+    #         [
+    #             (1, 'path',   u'could not extract video path: %s' % url),
+    #             (2, 'player', u'could not extract video player: %s' % url),
+    #             (3, 'url',    u'could not extract video url: %s' % url)
+    #         ]
+    #     )
+    #     video_url = u'%s/%s' % (info.get('url'), info.get('path'))
+
+    def extractPlus7Stream(self, url):
+        video_lang = url.split('/')[-3]
+        info = self.grep_webpage(
+            url,
+            r'param name="movie".*?videorefFileUrl=(http[^\'"&]*)',
+            0,
+            [
+                (1, 'url', u'Invalid URL: %s' % url)
+            ]
+        )
+        next_url = compat_urllib_parse.unquote(info.get('url'))
+        info = self.grep_webpage(
+            next_url,
+            r'<video lang="%s" ref="(http[^\'"&]*)' % video_lang,
+            0,
+            [
+                (1, 'url', u'Could not find <video> tag: %s' % url)
+            ]
+        )
+        next_url = compat_urllib_parse.unquote(info.get('url'))
+
+        info = self.grep_webpage(
+            next_url,
+            r'<video id="(.*?)".*?>.*?' +
+                '<name>(.*?)</name>.*?' +
+                '<dateVideo>(.*?)</dateVideo>.*?' +
+                '<url quality="hd">(.*?)</url>',
+            re.DOTALL,
+            [
+                (1, 'id',    u'could not extract video id: %s' % url),
+                (2, 'title', u'could not extract video title: %s' % url),
+                (3, 'date',  u'could not extract video date: %s' % url),
+                (4, 'url',   u'could not extract video url: %s' % url)
+            ]
+        )
+
+        return {
+            'id':           info.get('id'),
+            'url':          compat_urllib_parse.unquote(info.get('url')),
+            'uploader':     u'arte.tv',
+            'upload_date':  unified_strdate(info.get('date')),
+            'title':        info.get('title').decode('utf-8'),
+            'ext':          u'mp4',
+            'format':       u'NA',
+            'player_url':   None,
+        }
+
+    def _real_extract(self, url):
+        video_id = url.split('/')[-1]
+        self.report_extraction(video_id)
+
+        if re.search(self._LIVE_URL, video_id) is not None:
+            raise ExtractorError(u'Arte live streams are not yet supported, sorry')
+            # self.extractLiveStream(url)
+            # return
+        else:
+            info = self.extractPlus7Stream(url)
+
+        return [info]
diff --git a/youtube_dl/extractor/bandcamp.py b/youtube_dl/extractor/bandcamp.py
new file mode 100644
index 0000000..dcf6721
--- /dev/null
+++ b/youtube_dl/extractor/bandcamp.py
@@ -0,0 +1,54 @@
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+)
+
+
+class BandcampIE(InfoExtractor):
+    _VALID_URL = r'http://.*?\.bandcamp\.com/track/(?P<title>.*)'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        title = mobj.group('title')
+        webpage = self._download_webpage(url, title)
+        # We get the link to the free download page
+        m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
+        if m_download is None:
+            raise ExtractorError(u'No free songs found')
+
+        download_link = m_download.group(1)
+        id = re.search(r'var TralbumData = {(.*?)id: (?P<id>\d*?)$', 
+                       webpage, re.MULTILINE|re.DOTALL).group('id')
+
+        download_webpage = self._download_webpage(download_link, id,
+                                                  'Downloading free downloads page')
+        # We get the dictionary of the track from some javascrip code
+        info = re.search(r'items: (.*?),$',
+                         download_webpage, re.MULTILINE).group(1)
+        info = json.loads(info)[0]
+        # We pick mp3-320 for now, until format selection can be easily implemented.
+        mp3_info = info[u'downloads'][u'mp3-320']
+        # If we try to use this url it says the link has expired
+        initial_url = mp3_info[u'url']
+        re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
+        m_url = re.match(re_url, initial_url)
+        #We build the url we will use to get the final track url
+        # This url is build in Bandcamp in the script download_bunde_*.js
+        request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), id, m_url.group('ts'))
+        final_url_webpage = self._download_webpage(request_url, id, 'Requesting download url')
+        # If we could correctly generate the .rand field the url would be
+        #in the "download_url" key
+        final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
+
+        track_info = {'id':id,
+                      'title' : info[u'title'],
+                      'ext' :   'mp3',
+                      'url' :   final_url,
+                      'thumbnail' : info[u'thumb_url'],
+                      'uploader' :  info[u'artist']
+                      }
+
+        return [track_info]
diff --git a/youtube_dl/extractor/bliptv.py b/youtube_dl/extractor/bliptv.py
new file mode 100644
index 0000000..df2ad4b
--- /dev/null
+++ b/youtube_dl/extractor/bliptv.py
@@ -0,0 +1,177 @@
+import datetime
+import json
+import os
+import re
+import socket
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_http_client,
+    compat_parse_qs,
+    compat_str,
+    compat_urllib_error,
+    compat_urllib_parse_urlparse,
+    compat_urllib_request,
+
+    ExtractorError,
+    unescapeHTML,
+)
+
+
+class BlipTVIE(InfoExtractor):
+    """Information extractor for blip.tv"""
+
+    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(.+)$'
+    _URL_EXT = r'^.*\.([a-z0-9]+)$'
+    IE_NAME = u'blip.tv'
+
+    def report_direct_download(self, title):
+        """Report information extraction."""
+        self.to_screen(u'%s: Direct download detected' % title)
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        # See https://github.com/rg3/youtube-dl/issues/857
+        api_mobj = re.match(r'http://a\.blip\.tv/api\.swf#(?P<video_id>[\d\w]+)', url)
+        if api_mobj is not None:
+            url = 'http://blip.tv/play/g_%s' % api_mobj.group('video_id')
+        urlp = compat_urllib_parse_urlparse(url)
+        if urlp.path.startswith('/play/'):
+            request = compat_urllib_request.Request(url)
+            response = compat_urllib_request.urlopen(request)
+            redirecturl = response.geturl()
+            rurlp = compat_urllib_parse_urlparse(redirecturl)
+            file_id = compat_parse_qs(rurlp.fragment)['file'][0].rpartition('/')[2]
+            url = 'http://blip.tv/a/a-' + file_id
+            return self._real_extract(url)
+
+
+        if '?' in url:
+            cchar = '&'
+        else:
+            cchar = '?'
+        json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
+        request = compat_urllib_request.Request(json_url)
+        request.add_header('User-Agent', 'iTunes/10.6.1')
+        self.report_extraction(mobj.group(1))
+        info = None
+        try:
+            urlh = compat_urllib_request.urlopen(request)
+            if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
+                basename = url.split('/')[-1]
+                title,ext = os.path.splitext(basename)
+                title = title.decode('UTF-8')
+                ext = ext.replace('.', '')
+                self.report_direct_download(title)
+                info = {
+                    'id': title,
+                    'url': url,
+                    'uploader': None,
+                    'upload_date': None,
+                    'title': title,
+                    'ext': ext,
+                    'urlhandle': urlh
+                }
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            raise ExtractorError(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
+        if info is None: # Regular URL
+            try:
+                json_code_bytes = urlh.read()
+                json_code = json_code_bytes.decode('utf-8')
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                raise ExtractorError(u'Unable to read video info webpage: %s' % compat_str(err))
+
+            try:
+                json_data = json.loads(json_code)
+                if 'Post' in json_data:
+                    data = json_data['Post']
+                else:
+                    data = json_data
+
+                upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
+                video_url = data['media']['url']
+                umobj = re.match(self._URL_EXT, video_url)
+                if umobj is None:
+                    raise ValueError('Can not determine filename extension')
+                ext = umobj.group(1)
+
+                info = {
+                    'id': data['item_id'],
+                    'url': video_url,
+                    'uploader': data['display_name'],
+                    'upload_date': upload_date,
+                    'title': data['title'],
+                    'ext': ext,
+                    'format': data['media']['mimeType'],
+                    'thumbnail': data['thumbnailUrl'],
+                    'description': data['description'],
+                    'player_url': data['embedUrl'],
+                    'user_agent': 'iTunes/10.6.1',
+                }
+            except (ValueError,KeyError) as err:
+                raise ExtractorError(u'Unable to parse video information: %s' % repr(err))
+
+        return [info]
+
+
+class BlipTVUserIE(InfoExtractor):
+    """Information Extractor for blip.tv users."""
+
+    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
+    _PAGE_SIZE = 12
+    IE_NAME = u'blip.tv:user'
+
+    def _real_extract(self, url):
+        # Extract username
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        username = mobj.group(1)
+
+        page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
+
+        page = self._download_webpage(url, username, u'Downloading user page')
+        mobj = re.search(r'data-users-id="([^"]+)"', page)
+        page_base = page_base % mobj.group(1)
+
+
+        # Download video ids using BlipTV Ajax calls. Result size per
+        # query is limited (currently to 12 videos) so we need to query
+        # page by page until there are no video ids - it means we got
+        # all of them.
+
+        video_ids = []
+        pagenum = 1
+
+        while True:
+            url = page_base + "&page=" + str(pagenum)
+            page = self._download_webpage(url, username,
+                                          u'Downloading video ids from page %d' % pagenum)
+
+            # Extract video identifiers
+            ids_in_page = []
+
+            for mobj in re.finditer(r'href="/([^"]+)"', page):
+                if mobj.group(1) not in ids_in_page:
+                    ids_in_page.append(unescapeHTML(mobj.group(1)))
+
+            video_ids.extend(ids_in_page)
+
+            # A little optimization - if current page is not
+            # "full", ie. does not contain PAGE_SIZE video ids then
+            # we can assume that this page is the last one - there
+            # are no more ids on further pages - no need to query
+            # again.
+
+            if len(ids_in_page) < self._PAGE_SIZE:
+                break
+
+            pagenum += 1
+
+        urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids]
+        url_entries = [self.url_result(url, 'BlipTV') for url in urls]
+        return [self.playlist_result(url_entries, playlist_title = username)]
diff --git a/youtube_dl/extractor/breakcom.py b/youtube_dl/extractor/breakcom.py
new file mode 100644
index 0000000..1f6620d
--- /dev/null
+++ b/youtube_dl/extractor/breakcom.py
@@ -0,0 +1,25 @@
+import re
+
+from .common import InfoExtractor
+
+
+class BreakIE(InfoExtractor):
+    _VALID_URL = r'(?:http://)?(?:www\.)?break\.com/video/([^/]+)'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group(1).split("-")[-1]
+        webpage = self._download_webpage(url, video_id)
+        video_url = re.search(r"videoPath: '(.+?)',",webpage).group(1)
+        key = re.search(r"icon: '(.+?)',",webpage).group(1)
+        final_url = str(video_url)+"?"+str(key)
+        thumbnail_url = re.search(r"thumbnailURL: '(.+?)'",webpage).group(1)
+        title = re.search(r"sVidTitle: '(.+)',",webpage).group(1)
+        ext = video_url.split('.')[-1]
+        return [{
+            'id':        video_id,
+            'url':       final_url,
+            'ext':       ext,
+            'title':     title,
+            'thumbnail': thumbnail_url,
+        }]
diff --git a/youtube_dl/extractor/collegehumor.py b/youtube_dl/extractor/collegehumor.py
new file mode 100644
index 0000000..7ae0972
--- /dev/null
+++ b/youtube_dl/extractor/collegehumor.py
@@ -0,0 +1,74 @@
+import re
+import socket
+import xml.etree.ElementTree
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_http_client,
+    compat_str,
+    compat_urllib_error,
+    compat_urllib_parse_urlparse,
+    compat_urllib_request,
+
+    ExtractorError,
+)
+
+
+class CollegeHumorIE(InfoExtractor):
+    _WORKING = False
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/video/(?P<videoid>[0-9]+)/(?P<shorttitle>.*)$'
+
+    def report_manifest(self, video_id):
+        """Report information extraction."""
+        self.to_screen(u'%s: Downloading XML manifest' % video_id)
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+        video_id = mobj.group('videoid')
+
+        info = {
+            'id': video_id,
+            'uploader': None,
+            'upload_date': None,
+        }
+
+        self.report_extraction(video_id)
+        xmlUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id
+        try:
+            metaXml = compat_urllib_request.urlopen(xmlUrl).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            raise ExtractorError(u'Unable to download video info XML: %s' % compat_str(err))
+
+        mdoc = xml.etree.ElementTree.fromstring(metaXml)
+        try:
+            videoNode = mdoc.findall('./video')[0]
+            info['description'] = videoNode.findall('./description')[0].text
+            info['title'] = videoNode.findall('./caption')[0].text
+            info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
+            manifest_url = videoNode.findall('./file')[0].text
+        except IndexError:
+            raise ExtractorError(u'Invalid metadata XML file')
+
+        manifest_url += '?hdcore=2.10.3'
+        self.report_manifest(video_id)
+        try:
+            manifestXml = compat_urllib_request.urlopen(manifest_url).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            raise ExtractorError(u'Unable to download video info XML: %s' % compat_str(err))
+
+        adoc = xml.etree.ElementTree.fromstring(manifestXml)
+        try:
+            media_node = adoc.findall('./{http://ns.adobe.com/f4m/1.0}media')[0]
+            node_id = media_node.attrib['url']
+            video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text
+        except IndexError as err:
+            raise ExtractorError(u'Invalid manifest file')
+
+        url_pr = compat_urllib_parse_urlparse(manifest_url)
+        url = url_pr.scheme + '://' + url_pr.netloc + '/z' + video_id[:-2] + '/' + node_id + 'Seg1-Frag1'
+
+        info['url'] = url
+        info['ext'] = 'f4f'
+        return [info]
diff --git a/youtube_dl/extractor/comedycentral.py b/youtube_dl/extractor/comedycentral.py
new file mode 100644
index 0000000..1bb3590
--- /dev/null
+++ b/youtube_dl/extractor/comedycentral.py
@@ -0,0 +1,179 @@
+import re
+import xml.etree.ElementTree
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_str,
+    compat_urllib_parse,
+
+    ExtractorError,
+    unified_strdate,
+)
+
+
+class ComedyCentralIE(InfoExtractor):
+    """Information extractor for The Daily Show and Colbert Report """
+
+    # urls can be abbreviations like :thedailyshow or :colbert
+    # urls for episodes like:
+    # or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
+    #                     or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
+    #                     or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
+    _VALID_URL = r"""^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)
+                      |(https?://)?(www\.)?
+                          (?P<showname>thedailyshow|colbertnation)\.com/
+                         (full-episodes/(?P<episode>.*)|
+                          (?P<clip>
+                              (the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
+                              |(watch/(?P<date>[^/]*)/(?P<tdstitle>.*)))))
+                     $"""
+
+    _available_formats = ['3500', '2200', '1700', '1200', '750', '400']
+
+    _video_extensions = {
+        '3500': 'mp4',
+        '2200': 'mp4',
+        '1700': 'mp4',
+        '1200': 'mp4',
+        '750': 'mp4',
+        '400': 'mp4',
+    }
+    _video_dimensions = {
+        '3500': '1280x720',
+        '2200': '960x540',
+        '1700': '768x432',
+        '1200': '640x360',
+        '750': '512x288',
+        '400': '384x216',
+    }
+
+    @classmethod
+    def suitable(cls, url):
+        """Receives a URL and returns True if suitable for this IE."""
+        return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
+
+    def _print_formats(self, formats):
+        print('Available formats:')
+        for x in formats:
+            print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'mp4'), self._video_dimensions.get(x, '???')))
+
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url, re.VERBOSE)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        if mobj.group('shortname'):
+            if mobj.group('shortname') in ('tds', 'thedailyshow'):
+                url = u'http://www.thedailyshow.com/full-episodes/'
+            else:
+                url = u'http://www.colbertnation.com/full-episodes/'
+            mobj = re.match(self._VALID_URL, url, re.VERBOSE)
+            assert mobj is not None
+
+        if mobj.group('clip'):
+            if mobj.group('showname') == 'thedailyshow':
+                epTitle = mobj.group('tdstitle')
+            else:
+                epTitle = mobj.group('cntitle')
+            dlNewest = False
+        else:
+            dlNewest = not mobj.group('episode')
+            if dlNewest:
+                epTitle = mobj.group('showname')
+            else:
+                epTitle = mobj.group('episode')
+
+        self.report_extraction(epTitle)
+        webpage,htmlHandle = self._download_webpage_handle(url, epTitle)
+        if dlNewest:
+            url = htmlHandle.geturl()
+            mobj = re.match(self._VALID_URL, url, re.VERBOSE)
+            if mobj is None:
+                raise ExtractorError(u'Invalid redirected URL: ' + url)
+            if mobj.group('episode') == '':
+                raise ExtractorError(u'Redirected URL is still not specific: ' + url)
+            epTitle = mobj.group('episode')
+
+        mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
+
+        if len(mMovieParams) == 0:
+            # The Colbert Report embeds the information in a without
+            # a URL prefix; so extract the alternate reference
+            # and then add the URL prefix manually.
+
+            altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage)
+            if len(altMovieParams) == 0:
+                raise ExtractorError(u'unable to find Flash URL in webpage ' + url)
+            else:
+                mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
+
+        uri = mMovieParams[0][1]
+        indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
+        indexXml = self._download_webpage(indexUrl, epTitle,
+                                          u'Downloading show index',
+                                          u'unable to download episode index')
+
+        results = []
+
+        idoc = xml.etree.ElementTree.fromstring(indexXml)
+        itemEls = idoc.findall('.//item')
+        for partNum,itemEl in enumerate(itemEls):
+            mediaId = itemEl.findall('./guid')[0].text
+            shortMediaId = mediaId.split(':')[-1]
+            showId = mediaId.split(':')[-2].replace('.com', '')
+            officialTitle = itemEl.findall('./title')[0].text
+            officialDate = unified_strdate(itemEl.findall('./pubDate')[0].text)
+
+            configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
+                        compat_urllib_parse.urlencode({'uri': mediaId}))
+            configXml = self._download_webpage(configUrl, epTitle,
+                                               u'Downloading configuration for %s' % shortMediaId)
+
+            cdoc = xml.etree.ElementTree.fromstring(configXml)
+            turls = []
+            for rendition in cdoc.findall('.//rendition'):
+                finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
+                turls.append(finfo)
+
+            if len(turls) == 0:
+                self._downloader.report_error(u'unable to download ' + mediaId + ': No videos found')
+                continue
+
+            if self._downloader.params.get('listformats', None):
+                self._print_formats([i[0] for i in turls])
+                return
+
+            # For now, just pick the highest bitrate
+            format,rtmp_video_url = turls[-1]
+
+            # Get the format arg from the arg stream
+            req_format = self._downloader.params.get('format', None)
+
+            # Select format if we can find one
+            for f,v in turls:
+                if f == req_format:
+                    format, rtmp_video_url = f, v
+                    break
+
+            m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp.comedystor/.*)$', rtmp_video_url)
+            if not m:
+                raise ExtractorError(u'Cannot transform RTMP url')
+            base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
+            video_url = base + m.group('finalid')
+
+            effTitle = showId + u'-' + epTitle + u' part ' + compat_str(partNum+1)
+            info = {
+                'id': shortMediaId,
+                'url': video_url,
+                'uploader': showId,
+                'upload_date': officialDate,
+                'title': effTitle,
+                'ext': 'mp4',
+                'format': format,
+                'thumbnail': None,
+                'description': officialTitle,
+            }
+            results.append(info)
+
+        return results
diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
new file mode 100644
index 0000000..062f4cf
--- /dev/null
+++ b/youtube_dl/extractor/common.py
@@ -0,0 +1,264 @@
+import base64
+import os
+import re
+import socket
+import sys
+
+from ..utils import (
+    compat_http_client,
+    compat_urllib_error,
+    compat_urllib_request,
+    compat_str,
+
+    clean_html,
+    compiled_regex_type,
+    ExtractorError,
+)
+
+class InfoExtractor(object):
+    """Information Extractor class.
+
+    Information extractors are the classes that, given a URL, extract
+    information about the video (or videos) the URL refers to. This
+    information includes the real video URL, the video title, author and
+    others. The information is stored in a dictionary which is then
+    passed to the FileDownloader. The FileDownloader processes this
+    information possibly downloading the video to the file system, among
+    other possible outcomes.
+
+    The dictionaries must include the following fields:
+
+    id:             Video identifier.
+    url:            Final video URL.
+    title:          Video title, unescaped.
+    ext:            Video filename extension.
+
+    The following fields are optional:
+
+    format:         The video format, defaults to ext (used for --get-format)
+    thumbnail:      Full URL to a video thumbnail image.
+    description:    One-line video description.
+    uploader:       Full name of the video uploader.
+    upload_date:    Video upload date (YYYYMMDD).
+    uploader_id:    Nickname or id of the video uploader.
+    location:       Physical location of the video.
+    player_url:     SWF Player URL (used for rtmpdump).
+    subtitles:      The subtitle file contents.
+    urlhandle:      [internal] The urlHandle to be used to download the file,
+                    like returned by urllib.request.urlopen
+
+    The fields should all be Unicode strings.
+
+    Subclasses of this one should re-define the _real_initialize() and
+    _real_extract() methods and define a _VALID_URL regexp.
+    Probably, they should also be added to the list of extractors.
+
+    _real_extract() must return a *list* of information dictionaries as
+    described above.
+
+    Finally, the _WORKING attribute should be set to False for broken IEs
+    in order to warn the users and skip the tests.
+    """
+
+    _ready = False
+    _downloader = None
+    _WORKING = True
+
+    def __init__(self, downloader=None):
+        """Constructor. Receives an optional downloader."""
+        self._ready = False
+        self.set_downloader(downloader)
+
+    @classmethod
+    def suitable(cls, url):
+        """Receives a URL and returns True if suitable for this IE."""
+        return re.match(cls._VALID_URL, url) is not None
+
+    @classmethod
+    def working(cls):
+        """Getter method for _WORKING."""
+        return cls._WORKING
+
+    def initialize(self):
+        """Initializes an instance (authentication, etc)."""
+        if not self._ready:
+            self._real_initialize()
+            self._ready = True
+
+    def extract(self, url):
+        """Extracts URL information and returns it in list of dicts."""
+        self.initialize()
+        return self._real_extract(url)
+
+    def set_downloader(self, downloader):
+        """Sets the downloader for this IE."""
+        self._downloader = downloader
+
+    def _real_initialize(self):
+        """Real initialization process. Redefine in subclasses."""
+        pass
+
+    def _real_extract(self, url):
+        """Real extraction process. Redefine in subclasses."""
+        pass
+
+    @property
+    def IE_NAME(self):
+        return type(self).__name__[:-2]
+
+    def _request_webpage(self, url_or_request, video_id, note=None, errnote=None):
+        """ Returns the response handle """
+        if note is None:
+            self.report_download_webpage(video_id)
+        elif note is not False:
+            self.to_screen(u'%s: %s' % (video_id, note))
+        try:
+            return compat_urllib_request.urlopen(url_or_request)
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            if errnote is None:
+                errnote = u'Unable to download webpage'
+            raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2])
+
+    def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None):
+        """ Returns a tuple (page content as string, URL handle) """
+        urlh = self._request_webpage(url_or_request, video_id, note, errnote)
+        content_type = urlh.headers.get('Content-Type', '')
+        m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
+        if m:
+            encoding = m.group(1)
+        else:
+            encoding = 'utf-8'
+        webpage_bytes = urlh.read()
+        if self._downloader.params.get('dump_intermediate_pages', False):
+            try:
+                url = url_or_request.get_full_url()
+            except AttributeError:
+                url = url_or_request
+            self.to_screen(u'Dumping request to ' + url)
+            dump = base64.b64encode(webpage_bytes).decode('ascii')
+            self._downloader.to_screen(dump)
+        content = webpage_bytes.decode(encoding, 'replace')
+        return (content, urlh)
+
+    def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
+        """ Returns the data of the page as a string """
+        return self._download_webpage_handle(url_or_request, video_id, note, errnote)[0]
+
+    def to_screen(self, msg):
+        """Print msg to screen, prefixing it with '[ie_name]'"""
+        self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg))
+
+    def report_extraction(self, id_or_name):
+        """Report information extraction."""
+        self.to_screen(u'%s: Extracting information' % id_or_name)
+
+    def report_download_webpage(self, video_id):
+        """Report webpage download."""
+        self.to_screen(u'%s: Downloading webpage' % video_id)
+
+    def report_age_confirmation(self):
+        """Report attempt to confirm age."""
+        self.to_screen(u'Confirming age')
+
+    #Methods for following #608
+    #They set the correct value of the '_type' key
+    def video_result(self, video_info):
+        """Returns a video"""
+        video_info['_type'] = 'video'
+        return video_info
+    def url_result(self, url, ie=None):
+        """Returns a url that points to a page that should be processed"""
+        #TODO: ie should be the class used for getting the info
+        video_info = {'_type': 'url',
+                      'url': url,
+                      'ie_key': ie}
+        return video_info
+    def playlist_result(self, entries, playlist_id=None, playlist_title=None):
+        """Returns a playlist"""
+        video_info = {'_type': 'playlist',
+                      'entries': entries}
+        if playlist_id:
+            video_info['id'] = playlist_id
+        if playlist_title:
+            video_info['title'] = playlist_title
+        return video_info
+
+    def _search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
+        """
+        Perform a regex search on the given string, using a single or a list of
+        patterns returning the first matching group.
+        In case of failure return a default value or raise a WARNING or a
+        ExtractorError, depending on fatal, specifying the field name.
+        """
+        if isinstance(pattern, (str, compat_str, compiled_regex_type)):
+            mobj = re.search(pattern, string, flags)
+        else:
+            for p in pattern:
+                mobj = re.search(p, string, flags)
+                if mobj: break
+
+        if sys.stderr.isatty() and os.name != 'nt':
+            _name = u'\033[0;34m%s\033[0m' % name
+        else:
+            _name = name
+
+        if mobj:
+            # return the first matching group
+            return next(g for g in mobj.groups() if g is not None)
+        elif default is not None:
+            return default
+        elif fatal:
+            raise ExtractorError(u'Unable to extract %s' % _name)
+        else:
+            self._downloader.report_warning(u'unable to extract %s; '
+                u'please report this issue on GitHub.' % _name)
+            return None
+
+    def _html_search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
+        """
+        Like _search_regex, but strips HTML tags and unescapes entities.
+        """
+        res = self._search_regex(pattern, string, name, default, fatal, flags)
+        if res:
+            return clean_html(res).strip()
+        else:
+            return res
+
+class SearchInfoExtractor(InfoExtractor):
+    """
+    Base class for paged search queries extractors.
+    They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
+    Instances should define _SEARCH_KEY and _MAX_RESULTS.
+    """
+
+    @classmethod
+    def _make_valid_url(cls):
+        return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
+
+    @classmethod
+    def suitable(cls, url):
+        return re.match(cls._make_valid_url(), url) is not None
+
+    def _real_extract(self, query):
+        mobj = re.match(self._make_valid_url(), query)
+        if mobj is None:
+            raise ExtractorError(u'Invalid search query "%s"' % query)
+
+        prefix = mobj.group('prefix')
+        query = mobj.group('query')
+        if prefix == '':
+            return self._get_n_results(query, 1)
+        elif prefix == 'all':
+            return self._get_n_results(query, self._MAX_RESULTS)
+        else:
+            n = int(prefix)
+            if n <= 0:
+                raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query))
+            elif n > self._MAX_RESULTS:
+                self._downloader.report_warning(u'%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
+                n = self._MAX_RESULTS
+            return self._get_n_results(query, n)
+
+    def _get_n_results(self, query, n):
+        """Get a specified number of results for a query"""
+        raise NotImplementedError("This method must be implemented by sublclasses")
diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py
new file mode 100644
index 0000000..34306b0
--- /dev/null
+++ b/youtube_dl/extractor/dailymotion.py
@@ -0,0 +1,77 @@
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_request,
+    compat_urllib_parse,
+
+    ExtractorError,
+    unescapeHTML,
+)
+
+class DailymotionIE(InfoExtractor):
+    """Information Extractor for Dailymotion"""
+
+    _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
+    IE_NAME = u'dailymotion'
+
+    def _real_extract(self, url):
+        # Extract id and simplified title from URL
+        mobj = re.match(self._VALID_URL, url)
+
+        video_id = mobj.group(1).split('_')[0].split('?')[0]
+
+        video_extension = 'mp4'
+
+        # Retrieve video webpage to extract further information
+        request = compat_urllib_request.Request(url)
+        request.add_header('Cookie', 'family_filter=off')
+        webpage = self._download_webpage(request, video_id)
+
+        # Extract URL, uploader and title from webpage
+        self.report_extraction(video_id)
+        mobj = re.search(r'\s*var flashvars = (.*)', webpage)
+        if mobj is None:
+            raise ExtractorError(u'Unable to extract media URL')
+        flashvars = compat_urllib_parse.unquote(mobj.group(1))
+
+        for key in ['hd1080URL', 'hd720URL', 'hqURL', 'sdURL', 'ldURL', 'video_url']:
+            if key in flashvars:
+                max_quality = key
+                self.to_screen(u'Using %s' % key)
+                break
+        else:
+            raise ExtractorError(u'Unable to extract video URL')
+
+        mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars)
+        if mobj is None:
+            raise ExtractorError(u'Unable to extract video URL')
+
+        video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/')
+
+        # TODO: support choosing qualities
+
+        mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage)
+        if mobj is None:
+            raise ExtractorError(u'Unable to extract title')
+        video_title = unescapeHTML(mobj.group('title'))
+
+        video_uploader = None
+        video_uploader = self._search_regex([r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>',
+                                             # Looking for official user
+                                             r'<(?:span|a) .*?rel="author".*?>([^<]+?)</'],
+                                            webpage, 'video uploader')
+
+        video_upload_date = None
+        mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
+        if mobj is not None:
+            video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
+
+        return [{
+            'id':       video_id,
+            'url':      video_url,
+            'uploader': video_uploader,
+            'upload_date':  video_upload_date,
+            'title':    video_title,
+            'ext':      video_extension,
+        }]
diff --git a/youtube_dl/extractor/depositfiles.py b/youtube_dl/extractor/depositfiles.py
new file mode 100644
index 0000000..d433489
--- /dev/null
+++ b/youtube_dl/extractor/depositfiles.py
@@ -0,0 +1,60 @@
+import re
+import os
+import socket
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_http_client,
+    compat_str,
+    compat_urllib_error,
+    compat_urllib_parse,
+    compat_urllib_request,
+
+    ExtractorError,
+)
+
+
+class DepositFilesIE(InfoExtractor):
+    """Information extractor for depositfiles.com"""
+
+    _VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)'
+
+    def _real_extract(self, url):
+        file_id = url.split('/')[-1]
+        # Rebuild url in english locale
+        url = 'http://depositfiles.com/en/files/' + file_id
+
+        # Retrieve file webpage with 'Free download' button pressed
+        free_download_indication = { 'gateway_result' : '1' }
+        request = compat_urllib_request.Request(url, compat_urllib_parse.urlencode(free_download_indication))
+        try:
+            self.report_download_webpage(file_id)
+            webpage = compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            raise ExtractorError(u'Unable to retrieve file webpage: %s' % compat_str(err))
+
+        # Search for the real file URL
+        mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage)
+        if (mobj is None) or (mobj.group(1) is None):
+            # Try to figure out reason of the error.
+            mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
+            if (mobj is not None) and (mobj.group(1) is not None):
+                restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
+                raise ExtractorError(u'%s' % restriction_message)
+            else:
+                raise ExtractorError(u'Unable to extract download URL from: %s' % url)
+
+        file_url = mobj.group(1)
+        file_extension = os.path.splitext(file_url)[1][1:]
+
+        # Search for file title
+        file_title = self._search_regex(r'<b title="(.*?)">', webpage, u'title')
+
+        return [{
+            'id':       file_id.decode('utf-8'),
+            'url':      file_url.decode('utf-8'),
+            'uploader': None,
+            'upload_date':  None,
+            'title':    file_title,
+            'ext':      file_extension.decode('utf-8'),
+        }]
diff --git a/youtube_dl/extractor/eighttracks.py b/youtube_dl/extractor/eighttracks.py
new file mode 100644
index 0000000..c3d4343
--- /dev/null
+++ b/youtube_dl/extractor/eighttracks.py
@@ -0,0 +1,51 @@
+import itertools
+import json
+import random
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+)
+
+
+class EightTracksIE(InfoExtractor):
+    IE_NAME = '8tracks'
+    _VALID_URL = r'https?://8tracks.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+        playlist_id = mobj.group('id')
+
+        webpage = self._download_webpage(url, playlist_id)
+
+        json_like = self._search_regex(r"PAGE.mix = (.*?);\n", webpage, u'trax information', flags=re.DOTALL)
+        data = json.loads(json_like)
+
+        session = str(random.randint(0, 1000000000))
+        mix_id = data['id']
+        track_count = data['tracks_count']
+        first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)
+        next_url = first_url
+        res = []
+        for i in itertools.count():
+            api_json = self._download_webpage(next_url, playlist_id,
+                note=u'Downloading song information %s/%s' % (str(i+1), track_count),
+                errnote=u'Failed to download song information')
+            api_data = json.loads(api_json)
+            track_data = api_data[u'set']['track']
+            info = {
+                'id': track_data['id'],
+                'url': track_data['track_file_stream_url'],
+                'title': track_data['performer'] + u' - ' + track_data['name'],
+                'raw_title': track_data['name'],
+                'uploader_id': data['user']['login'],
+                'ext': 'm4a',
+            }
+            res.append(info)
+            if api_data['set']['at_last_track']:
+                break
+            next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (session, mix_id, track_data['id'])
+        return res
diff --git a/youtube_dl/extractor/escapist.py b/youtube_dl/extractor/escapist.py
new file mode 100644
index 0000000..86b145b
--- /dev/null
+++ b/youtube_dl/extractor/escapist.py
@@ -0,0 +1,68 @@
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_str,
+    compat_urllib_parse,
+
+    ExtractorError,
+)
+
+
+class EscapistIE(InfoExtractor):
+    _VALID_URL = r'^(https?://)?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+        showName = mobj.group('showname')
+        videoId = mobj.group('episode')
+
+        self.report_extraction(videoId)
+        webpage = self._download_webpage(url, videoId)
+
+        videoDesc = self._html_search_regex('<meta name="description" content="([^"]*)"',
+            webpage, u'description', fatal=False)
+
+        imgUrl = self._html_search_regex('<meta property="og:image" content="([^"]*)"',
+            webpage, u'thumbnail', fatal=False)
+
+        playerUrl = self._html_search_regex('<meta property="og:video" content="([^"]*)"',
+            webpage, u'player url')
+
+        title = self._html_search_regex('<meta name="title" content="([^"]*)"',
+            webpage, u'player url').split(' : ')[-1]
+
+        configUrl = self._search_regex('config=(.*)$', playerUrl, u'config url')
+        configUrl = compat_urllib_parse.unquote(configUrl)
+
+        configJSON = self._download_webpage(configUrl, videoId,
+                                            u'Downloading configuration',
+                                            u'unable to download configuration')
+
+        # Technically, it's JavaScript, not JSON
+        configJSON = configJSON.replace("'", '"')
+
+        try:
+            config = json.loads(configJSON)
+        except (ValueError,) as err:
+            raise ExtractorError(u'Invalid JSON in configuration file: ' + compat_str(err))
+
+        playlist = config['playlist']
+        videoUrl = playlist[1]['url']
+
+        info = {
+            'id': videoId,
+            'url': videoUrl,
+            'uploader': showName,
+            'upload_date': None,
+            'title': title,
+            'ext': 'mp4',
+            'thumbnail': imgUrl,
+            'description': videoDesc,
+            'player_url': playerUrl,
+        }
+
+        return [info]
diff --git a/youtube_dl/extractor/facebook.py b/youtube_dl/extractor/facebook.py
new file mode 100644
index 0000000..c694f9a
--- /dev/null
+++ b/youtube_dl/extractor/facebook.py
@@ -0,0 +1,111 @@
+import json
+import netrc
+import re
+import socket
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_http_client,
+    compat_str,
+    compat_urllib_error,
+    compat_urllib_parse,
+    compat_urllib_request,
+
+    ExtractorError,
+)
+
+
+class FacebookIE(InfoExtractor):
+    """Information Extractor for Facebook"""
+
+    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
+    _LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
+    _NETRC_MACHINE = 'facebook'
+    IE_NAME = u'facebook'
+
+    def report_login(self):
+        """Report attempt to log in."""
+        self.to_screen(u'Logging in')
+
+    def _real_initialize(self):
+        if self._downloader is None:
+            return
+
+        useremail = None
+        password = None
+        downloader_params = self._downloader.params
+
+        # Attempt to use provided username and password or .netrc data
+        if downloader_params.get('username', None) is not None:
+            useremail = downloader_params['username']
+            password = downloader_params['password']
+        elif downloader_params.get('usenetrc', False):
+            try:
+                info = netrc.netrc().authenticators(self._NETRC_MACHINE)
+                if info is not None:
+                    useremail = info[0]
+                    password = info[2]
+                else:
+                    raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
+            except (IOError, netrc.NetrcParseError) as err:
+                self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
+                return
+
+        if useremail is None:
+            return
+
+        # Log in
+        login_form = {
+            'email': useremail,
+            'pass': password,
+            'login': 'Log+In'
+            }
+        request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
+        try:
+            self.report_login()
+            login_results = compat_urllib_request.urlopen(request).read()
+            if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
+                self._downloader.report_warning(u'unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
+                return
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))
+            return
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+        video_id = mobj.group('ID')
+
+        url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
+        webpage = self._download_webpage(url, video_id)
+
+        BEFORE = '{swf.addParam(param[0], param[1]);});\n'
+        AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
+        m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
+        if not m:
+            raise ExtractorError(u'Cannot parse data')
+        data = dict(json.loads(m.group(1)))
+        params_raw = compat_urllib_parse.unquote(data['params'])
+        params = json.loads(params_raw)
+        video_data = params['video_data'][0]
+        video_url = video_data.get('hd_src')
+        if not video_url:
+            video_url = video_data['sd_src']
+        if not video_url:
+            raise ExtractorError(u'Cannot find video URL')
+        video_duration = int(video_data['video_duration'])
+        thumbnail = video_data['thumbnail_src']
+
+        video_title = self._html_search_regex('<h2 class="uiHeaderTitle">([^<]+)</h2>',
+            webpage, u'title')
+
+        info = {
+            'id': video_id,
+            'title': video_title,
+            'url': video_url,
+            'ext': 'mp4',
+            'duration': video_duration,
+            'thumbnail': thumbnail,
+        }
+        return [info]
diff --git a/youtube_dl/extractor/flickr.py b/youtube_dl/extractor/flickr.py
new file mode 100644
index 0000000..791d5b6
--- /dev/null
+++ b/youtube_dl/extractor/flickr.py
@@ -0,0 +1,57 @@
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    unescapeHTML,
+)
+
+
+class FlickrIE(InfoExtractor):
+    """Information Extractor for Flickr videos"""
+    _VALID_URL = r'(?:https?://)?(?:www\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+
+        video_id = mobj.group('id')
+        video_uploader_id = mobj.group('uploader_id')
+        webpage_url = 'http://www.flickr.com/photos/' + video_uploader_id + '/' + video_id
+        webpage = self._download_webpage(webpage_url, video_id)
+
+        secret = self._search_regex(r"photo_secret: '(\w+)'", webpage, u'secret')
+
+        first_url = 'https://secure.flickr.com/apps/video/video_mtl_xml.gne?v=x&photo_id=' + video_id + '&secret=' + secret + '&bitrate=700&target=_self'
+        first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage')
+
+        node_id = self._html_search_regex(r'<Item id="id">(\d+-\d+)</Item>',
+            first_xml, u'node_id')
+
+        second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1'
+        second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage')
+
+        self.report_extraction(video_id)
+
+        mobj = re.search(r'<STREAM APP="(.+?)" FULLPATH="(.+?)"', second_xml)
+        if mobj is None:
+            raise ExtractorError(u'Unable to extract video url')
+        video_url = mobj.group(1) + unescapeHTML(mobj.group(2))
+
+        video_title = self._html_search_regex(r'<meta property="og:title" content=(?:"([^"]+)"|\'([^\']+)\')',
+            webpage, u'video title')
+
+        video_description = self._html_search_regex(r'<meta property="og:description" content=(?:"([^"]+)"|\'([^\']+)\')',
+            webpage, u'description', fatal=False)
+
+        thumbnail = self._html_search_regex(r'<meta property="og:image" content=(?:"([^"]+)"|\'([^\']+)\')',
+            webpage, u'thumbnail', fatal=False)
+
+        return [{
+            'id':          video_id,
+            'url':         video_url,
+            'ext':         'mp4',
+            'title':       video_title,
+            'description': video_description,
+            'thumbnail':   thumbnail,
+            'uploader_id': video_uploader_id,
+        }]
diff --git a/youtube_dl/extractor/funnyordie.py b/youtube_dl/extractor/funnyordie.py
new file mode 100644
index 0000000..3045978
--- /dev/null
+++ b/youtube_dl/extractor/funnyordie.py
@@ -0,0 +1,31 @@
+import re
+
+from .common import InfoExtractor
+
+
+class FunnyOrDieIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?funnyordie\.com/videos/(?P<id>[0-9a-f]+)/.*$'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+
+        video_id = mobj.group('id')
+        webpage = self._download_webpage(url, video_id)
+
+        video_url = self._html_search_regex(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"',
+            webpage, u'video URL', flags=re.DOTALL)
+
+        title = self._html_search_regex((r"<h1 class='player_page_h1'.*?>(?P<title>.*?)</h1>",
+            r'<title>(?P<title>[^<]+?)</title>'), webpage, 'title', flags=re.DOTALL)
+
+        video_description = self._html_search_regex(r'<meta property="og:description" content="(?P<desc>.*?)"',
+            webpage, u'description', fatal=False, flags=re.DOTALL)
+
+        info = {
+            'id': video_id,
+            'url': video_url,
+            'ext': 'mp4',
+            'title': title,
+            'description': video_description,
+        }
+        return [info]
diff --git a/youtube_dl/extractor/gametrailers.py b/youtube_dl/extractor/gametrailers.py
new file mode 100644
index 0000000..33e59e8
--- /dev/null
+++ b/youtube_dl/extractor/gametrailers.py
@@ -0,0 +1,59 @@
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse,
+
+    ExtractorError,
+)
+
+class GametrailersIE(InfoExtractor):
+    _VALID_URL = r'http://www.gametrailers.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+        video_id = mobj.group('id')
+        video_type = mobj.group('type')
+        webpage = self._download_webpage(url, video_id)
+        if video_type == 'full-episodes':
+            mgid_re = r'data-video="(?P<mgid>mgid:.*?)"'
+        else:
+            mgid_re = r'data-contentId=\'(?P<mgid>mgid:.*?)\''
+        mgid = self._search_regex(mgid_re, webpage, u'mgid')
+        data = compat_urllib_parse.urlencode({'uri': mgid, 'acceptMethods': 'fms'})
+
+        info_page = self._download_webpage('http://www.gametrailers.com/feeds/mrss?' + data,
+                                           video_id, u'Downloading video info')
+        links_webpage = self._download_webpage('http://www.gametrailers.com/feeds/mediagen/?' + data,
+                                               video_id, u'Downloading video urls info')
+
+        self.report_extraction(video_id)
+        info_re = r'''<title><!\[CDATA\[(?P<title>.*?)\]\]></title>.*
+                      <description><!\[CDATA\[(?P<description>.*?)\]\]></description>.*
+                      '''
+
+        m_info = re.search(info_re, info_page, re.VERBOSE|re.DOTALL)
+        if m_info is None:
+            raise ExtractorError(u'Unable to extract video info')
+        video_title = m_info.group('title')
+        video_description = m_info.group('description')
+        video_thumb = m_info.group('thumb')
+
+        m_urls = list(re.finditer(r'<src>(?P<url>.*)</src>', links_webpage))
+        if m_urls is None or len(m_urls) == 0:
+            raise ExtractorError(u'Unable to extract video url')
+        # They are sorted from worst to best quality
+        video_url = m_urls[-1].group('url')
+
+        return {'url':         video_url,
+                'id':          video_id,
+                'title':       video_title,
+                # Videos are actually flv not mp4
+                'ext':         'flv',
+                'thumbnail':   video_thumb,
+                'description': video_description,
+                }
diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
new file mode 100644
index 0000000..7a877b3
--- /dev/null
+++ b/youtube_dl/extractor/generic.py
@@ -0,0 +1,151 @@
+import os
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_error,
+    compat_urllib_parse,
+    compat_urllib_request,
+
+    ExtractorError,
+)
+
+class GenericIE(InfoExtractor):
+    """Generic last-resort information extractor."""
+
+    _VALID_URL = r'.*'
+    IE_NAME = u'generic'
+
+    def report_download_webpage(self, video_id):
+        """Report webpage download."""
+        if not self._downloader.params.get('test', False):
+            self._downloader.report_warning(u'Falling back on generic information extractor.')
+        super(GenericIE, self).report_download_webpage(video_id)
+
+    def report_following_redirect(self, new_url):
+        """Report information extraction."""
+        self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url)
+
+    def _test_redirect(self, url):
+        """Check if it is a redirect, like url shorteners, in case return the new url."""
+        class HeadRequest(compat_urllib_request.Request):
+            def get_method(self):
+                return "HEAD"
+
+        class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
+            """
+            Subclass the HTTPRedirectHandler to make it use our
+            HeadRequest also on the redirected URL
+            """
+            def redirect_request(self, req, fp, code, msg, headers, newurl):
+                if code in (301, 302, 303, 307):
+                    newurl = newurl.replace(' ', '%20')
+                    newheaders = dict((k,v) for k,v in req.headers.items()
+                                      if k.lower() not in ("content-length", "content-type"))
+                    return HeadRequest(newurl,
+                                       headers=newheaders,
+                                       origin_req_host=req.get_origin_req_host(),
+                                       unverifiable=True)
+                else:
+                    raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
+
+        class HTTPMethodFallback(compat_urllib_request.BaseHandler):
+            """
+            Fallback to GET if HEAD is not allowed (405 HTTP error)
+            """
+            def http_error_405(self, req, fp, code, msg, headers):
+                fp.read()
+                fp.close()
+
+                newheaders = dict((k,v) for k,v in req.headers.items()
+                                  if k.lower() not in ("content-length", "content-type"))
+                return self.parent.open(compat_urllib_request.Request(req.get_full_url(),
+                                                 headers=newheaders,
+                                                 origin_req_host=req.get_origin_req_host(),
+                                                 unverifiable=True))
+
+        # Build our opener
+        opener = compat_urllib_request.OpenerDirector()
+        for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
+                        HTTPMethodFallback, HEADRedirectHandler,
+                        compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
+            opener.add_handler(handler())
+
+        response = opener.open(HeadRequest(url))
+        if response is None:
+            raise ExtractorError(u'Invalid URL protocol')
+        new_url = response.geturl()
+
+        if url == new_url:
+            return False
+
+        self.report_following_redirect(new_url)
+        return new_url
+
+    def _real_extract(self, url):
+        new_url = self._test_redirect(url)
+        if new_url: return [self.url_result(new_url)]
+
+        video_id = url.split('/')[-1]
+        try:
+            webpage = self._download_webpage(url, video_id)
+        except ValueError:
+            # since this is the last-resort InfoExtractor, if
+            # this error is thrown, it'll be thrown here
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        self.report_extraction(video_id)
+        # Start with something easy: JW Player in SWFObject
+        mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
+        if mobj is None:
+            # Broaden the search a little bit
+            mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
+        if mobj is None:
+            # Broaden the search a little bit: JWPlayer JS loader
+            mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
+        if mobj is None:
+            # Try to find twitter cards info
+            mobj = re.search(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
+        if mobj is None:
+            # We look for Open Graph info:
+            # We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
+            m_video_type = re.search(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
+            # We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
+            if m_video_type is not None:
+                mobj = re.search(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        # It's possible that one of the regexes
+        # matched, but returned an empty group:
+        if mobj.group(1) is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        video_url = compat_urllib_parse.unquote(mobj.group(1))
+        video_id = os.path.basename(video_url)
+
+        # here's a fun little line of code for you:
+        video_extension = os.path.splitext(video_id)[1][1:]
+        video_id = os.path.splitext(video_id)[0]
+
+        # it's tempting to parse this further, but you would
+        # have to take into account all the variations like
+        #   Video Title - Site Name
+        #   Site Name | Video Title
+        #   Video Title - Tagline | Site Name
+        # and so on and so forth; it's just not practical
+        video_title = self._html_search_regex(r'<title>(.*)</title>',
+            webpage, u'video title')
+
+        # video uploader is domain name
+        video_uploader = self._search_regex(r'(?:https?://)?([^/]*)/.*',
+            url, u'video uploader')
+
+        return [{
+            'id':       video_id,
+            'url':      video_url,
+            'uploader': video_uploader,
+            'upload_date':  None,
+            'title':    video_title,
+            'ext':      video_extension,
+        }]
diff --git a/youtube_dl/extractor/googleplus.py b/youtube_dl/extractor/googleplus.py
new file mode 100644
index 0000000..e922bd1
--- /dev/null
+++ b/youtube_dl/extractor/googleplus.py
@@ -0,0 +1,82 @@
+import datetime
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+)
+
+
+class GooglePlusIE(InfoExtractor):
+    """Information extractor for plus.google.com."""
+
+    _VALID_URL = r'(?:https://)?plus\.google\.com/(?:[^/]+/)*?posts/(\w+)'
+    IE_NAME = u'plus.google'
+
+    def _real_extract(self, url):
+        # Extract id from URL
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        post_url = mobj.group(0)
+        video_id = mobj.group(1)
+
+        video_extension = 'flv'
+
+        # Step 1, Retrieve post webpage to extract further information
+        webpage = self._download_webpage(post_url, video_id, u'Downloading entry webpage')
+
+        self.report_extraction(video_id)
+
+        # Extract update date
+        upload_date = self._html_search_regex('title="Timestamp">(.*?)</a>',
+            webpage, u'upload date', fatal=False)
+        if upload_date:
+            # Convert timestring to a format suitable for filename
+            upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
+            upload_date = upload_date.strftime('%Y%m%d')
+
+        # Extract uploader
+        uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
+            webpage, u'uploader', fatal=False)
+
+        # Extract title
+        # Get the first line for title
+        video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
+            webpage, 'title', default=u'NA')
+
+        # Step 2, Stimulate clicking the image box to launch video
+        video_page = self._search_regex('"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]',
+            webpage, u'video page URL')
+        webpage = self._download_webpage(video_page, video_id, u'Downloading video page')
+
+        # Extract video links on video page
+        """Extract video links of all sizes"""
+        pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
+        mobj = re.findall(pattern, webpage)
+        if len(mobj) == 0:
+            raise ExtractorError(u'Unable to extract video links')
+
+        # Sort in resolution
+        links = sorted(mobj)
+
+        # Choose the lowest of the sort, i.e. highest resolution
+        video_url = links[-1]
+        # Only get the url. The resolution part in the tuple has no use anymore
+        video_url = video_url[-1]
+        # Treat escaped \u0026 style hex
+        try:
+            video_url = video_url.decode("unicode_escape")
+        except AttributeError: # Python 3
+            video_url = bytes(video_url, 'ascii').decode('unicode-escape')
+
+
+        return [{
+            'id':       video_id,
+            'url':      video_url,
+            'uploader': uploader,
+            'upload_date':  upload_date,
+            'title':    video_title,
+            'ext':      video_extension,
+        }]
diff --git a/youtube_dl/extractor/googlesearch.py b/youtube_dl/extractor/googlesearch.py
new file mode 100644
index 0000000..21c240e
--- /dev/null
+++ b/youtube_dl/extractor/googlesearch.py
@@ -0,0 +1,39 @@
+import itertools
+import re
+
+from .common import SearchInfoExtractor
+from ..utils import (
+    compat_urllib_parse,
+)
+
+
+class GoogleSearchIE(SearchInfoExtractor):
+    """Information Extractor for Google Video search queries."""
+    _MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"'
+    _MAX_RESULTS = 1000
+    IE_NAME = u'video.google:search'
+    _SEARCH_KEY = 'gvsearch'
+
+    def _get_n_results(self, query, n):
+        """Get a specified number of results for a query"""
+
+        res = {
+            '_type': 'playlist',
+            'id': query,
+            'entries': []
+        }
+
+        for pagenum in itertools.count(1):
+            result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10)
+            webpage = self._download_webpage(result_url, u'gvsearch:' + query,
+                                             note='Downloading result page ' + str(pagenum))
+
+            for mobj in re.finditer(r'<h3 class="r"><a href="([^"]+)"', webpage):
+                e = {
+                    '_type': 'url',
+                    'url': mobj.group(1)
+                }
+                res['entries'].append(e)
+
+            if (pagenum * 10 > n) or not re.search(self._MORE_PAGES_INDICATOR, webpage):
+                return res
diff --git a/youtube_dl/extractor/howcast.py b/youtube_dl/extractor/howcast.py
new file mode 100644
index 0000000..7b94f85
--- /dev/null
+++ b/youtube_dl/extractor/howcast.py
@@ -0,0 +1,37 @@
+import re
+
+from .common import InfoExtractor
+
+
+class HowcastIE(InfoExtractor):
+    _VALID_URL = r'(?:https?://)?(?:www\.)?howcast\.com/videos/(?P<id>\d+)'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+
+        video_id = mobj.group('id')
+        webpage_url = 'http://www.howcast.com/videos/' + video_id
+        webpage = self._download_webpage(webpage_url, video_id)
+
+        self.report_extraction(video_id)
+
+        video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)',
+            webpage, u'video URL')
+
+        video_title = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') property=\'og:title\'',
+            webpage, u'title')
+
+        video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'',
+            webpage, u'description', fatal=False)
+
+        thumbnail = self._html_search_regex(r'<meta content=\'(.+?)\' property=\'og:image\'',
+            webpage, u'thumbnail', fatal=False)
+
+        return [{
+            'id':       video_id,
+            'url':      video_url,
+            'ext':      'mp4',
+            'title':    video_title,
+            'description': video_description,
+            'thumbnail': thumbnail,
+        }]
diff --git a/youtube_dl/extractor/hypem.py b/youtube_dl/extractor/hypem.py
new file mode 100644
index 0000000..ceec4f6
--- /dev/null
+++ b/youtube_dl/extractor/hypem.py
@@ -0,0 +1,63 @@
+import json
+import re
+import time
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_str,
+    compat_urllib_parse,
+    compat_urllib_request,
+
+    ExtractorError,
+)
+
+
+class HypemIE(InfoExtractor):
+    """Information Extractor for hypem"""
+    _VALID_URL = r'(?:http://)?(?:www\.)?hypem\.com/track/([^/]+)/([^/]+)'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+        track_id = mobj.group(1)
+
+        data = { 'ax': 1, 'ts': time.time() }
+        data_encoded = compat_urllib_parse.urlencode(data)
+        complete_url = url + "?" + data_encoded
+        request = compat_urllib_request.Request(complete_url)
+        response, urlh = self._download_webpage_handle(request, track_id, u'Downloading webpage with the url')
+        cookie = urlh.headers.get('Set-Cookie', '')
+
+        self.report_extraction(track_id)
+
+        html_tracks = self._html_search_regex(r'<script type="application/json" id="displayList-data">(.*?)</script>',
+            response, u'tracks', flags=re.MULTILINE|re.DOTALL).strip()
+        try:
+            track_list = json.loads(html_tracks)
+            track = track_list[u'tracks'][0]
+        except ValueError:
+            raise ExtractorError(u'Hypemachine contained invalid JSON.')
+
+        key = track[u"key"]
+        track_id = track[u"id"]
+        artist = track[u"artist"]
+        title = track[u"song"]
+
+        serve_url = "http://hypem.com/serve/source/%s/%s" % (compat_str(track_id), compat_str(key))
+        request = compat_urllib_request.Request(serve_url, "" , {'Content-Type': 'application/json'})
+        request.add_header('cookie', cookie)
+        song_data_json = self._download_webpage(request, track_id, u'Downloading metadata')
+        try:
+            song_data = json.loads(song_data_json)
+        except ValueError:
+            raise ExtractorError(u'Hypemachine contained invalid JSON.')
+        final_url = song_data[u"url"]
+
+        return [{
+            'id':       track_id,
+            'url':      final_url,
+            'ext':      "mp3",
+            'title':    title,
+            'artist':   artist,
+        }]
\ No newline at end of file
diff --git a/youtube_dl/extractor/ina.py b/youtube_dl/extractor/ina.py
new file mode 100644
index 0000000..c19b956
--- /dev/null
+++ b/youtube_dl/extractor/ina.py
@@ -0,0 +1,31 @@
+import re
+
+from .common import InfoExtractor
+
+
+class InaIE(InfoExtractor):
+    """Information Extractor for Ina.fr"""
+    _VALID_URL = r'(?:http://)?(?:www\.)?ina\.fr/video/(?P<id>I[0-9]+)/.*'
+
+    def _real_extract(self,url):
+        mobj = re.match(self._VALID_URL, url)
+
+        video_id = mobj.group('id')
+        mrss_url='http://player.ina.fr/notices/%s.mrss' % video_id
+        video_extension = 'mp4'
+        webpage = self._download_webpage(mrss_url, video_id)
+
+        self.report_extraction(video_id)
+
+        video_url = self._html_search_regex(r'<media:player url="(?P<mp4url>http://mp4.ina.fr/[^"]+\.mp4)',
+            webpage, u'video URL')
+
+        video_title = self._search_regex(r'<title><!\[CDATA\[(?P<titre>.*?)]]></title>',
+            webpage, u'title')
+
+        return [{
+            'id':       video_id,
+            'url':      video_url,
+            'ext':      video_extension,
+            'title':    video_title,
+        }]
diff --git a/youtube_dl/extractor/infoq.py b/youtube_dl/extractor/infoq.py
new file mode 100644
index 0000000..9056742
--- /dev/null
+++ b/youtube_dl/extractor/infoq.py
@@ -0,0 +1,50 @@
+import base64
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse,
+
+    ExtractorError,
+)
+
+
+class InfoQIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+
+        webpage = self._download_webpage(url, video_id=url)
+        self.report_extraction(url)
+
+        # Extract video URL
+        mobj = re.search(r"jsclassref ?= ?'([^']*)'", webpage)
+        if mobj is None:
+            raise ExtractorError(u'Unable to extract video url')
+        real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8'))
+        video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id
+
+        # Extract title
+        video_title = self._search_regex(r'contentTitle = "(.*?)";',
+            webpage, u'title')
+
+        # Extract description
+        video_description = self._html_search_regex(r'<meta name="description" content="(.*)"(?:\s*/)?>',
+            webpage, u'description', fatal=False)
+
+        video_filename = video_url.split('/')[-1]
+        video_id, extension = video_filename.split('.')
+
+        info = {
+            'id': video_id,
+            'url': video_url,
+            'uploader': None,
+            'upload_date': None,
+            'title': video_title,
+            'ext': extension, # Extension is always(?) mp4, but seems to be flv
+            'thumbnail': None,
+            'description': video_description,
+        }
+
+        return [info]
\ No newline at end of file
diff --git a/youtube_dl/extractor/justintv.py b/youtube_dl/extractor/justintv.py
new file mode 100644
index 0000000..b2006e3
--- /dev/null
+++ b/youtube_dl/extractor/justintv.py
@@ -0,0 +1,144 @@
+import json
+import os
+import re
+import xml.etree.ElementTree
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    formatSeconds,
+)
+
+
+class JustinTVIE(InfoExtractor):
+    """Information extractor for justin.tv and twitch.tv"""
+    # TODO: One broadcast may be split into multiple videos. The key
+    # 'broadcast_id' is the same for all parts, and 'broadcast_part'
+    # starts at 1 and increases. Can we treat all parts as one video?
+
+    _VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?(?:twitch|justin)\.tv/
+        (?:
+            (?P<channelid>[^/]+)|
+            (?:(?:[^/]+)/b/(?P<videoid>[^/]+))|
+            (?:(?:[^/]+)/c/(?P<chapterid>[^/]+))
+        )
+        /?(?:\#.*)?$
+        """
+    _JUSTIN_PAGE_LIMIT = 100
+    IE_NAME = u'justin.tv'
+
+    def report_download_page(self, channel, offset):
+        """Report attempt to download a single page of videos."""
+        self.to_screen(u'%s: Downloading video information from %d to %d' %
+                (channel, offset, offset + self._JUSTIN_PAGE_LIMIT))
+
+    # Return count of items, list of *valid* items
+    def _parse_page(self, url, video_id):
+        info_json = self._download_webpage(url, video_id,
+                                           u'Downloading video info JSON',
+                                           u'unable to download video info JSON')
+
+        response = json.loads(info_json)
+        if type(response) != list:
+            error_text = response.get('error', 'unknown error')
+            raise ExtractorError(u'Justin.tv API: %s' % error_text)
+        info = []
+        for clip in response:
+            video_url = clip['video_file_url']
+            if video_url:
+                video_extension = os.path.splitext(video_url)[1][1:]
+                video_date = re.sub('-', '', clip['start_time'][:10])
+                video_uploader_id = clip.get('user_id', clip.get('channel_id'))
+                video_id = clip['id']
+                video_title = clip.get('title', video_id)
+                info.append({
+                    'id': video_id,
+                    'url': video_url,
+                    'title': video_title,
+                    'uploader': clip.get('channel_name', video_uploader_id),
+                    'uploader_id': video_uploader_id,
+                    'upload_date': video_date,
+                    'ext': video_extension,
+                })
+        return (len(response), info)
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'invalid URL: %s' % url)
+
+        api_base = 'http://api.justin.tv'
+        paged = False
+        if mobj.group('channelid'):
+            paged = True
+            video_id = mobj.group('channelid')
+            api = api_base + '/channel/archives/%s.json' % video_id
+        elif mobj.group('chapterid'):
+            chapter_id = mobj.group('chapterid')
+
+            webpage = self._download_webpage(url, chapter_id)
+            m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage)
+            if not m:
+                raise ExtractorError(u'Cannot find archive of a chapter')
+            archive_id = m.group(1)
+
+            api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
+            chapter_info_xml = self._download_webpage(api, chapter_id,
+                                             note=u'Downloading chapter information',
+                                             errnote=u'Chapter information download failed')
+            doc = xml.etree.ElementTree.fromstring(chapter_info_xml)
+            for a in doc.findall('.//archive'):
+                if archive_id == a.find('./id').text:
+                    break
+            else:
+                raise ExtractorError(u'Could not find chapter in chapter information')
+
+            video_url = a.find('./video_file_url').text
+            video_ext = video_url.rpartition('.')[2] or u'flv'
+
+            chapter_api_url = u'https://api.twitch.tv/kraken/videos/c' + chapter_id
+            chapter_info_json = self._download_webpage(chapter_api_url, u'c' + chapter_id,
+                                   note='Downloading chapter metadata',
+                                   errnote='Download of chapter metadata failed')
+            chapter_info = json.loads(chapter_info_json)
+
+            bracket_start = int(doc.find('.//bracket_start').text)
+            bracket_end = int(doc.find('.//bracket_end').text)
+
+            # TODO determine start (and probably fix up file)
+            #  youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
+            #video_url += u'?start=' + TODO:start_timestamp
+            # bracket_start is 13290, but we want 51670615
+            self._downloader.report_warning(u'Chapter detected, but we can just download the whole file. '
+                                            u'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
+
+            info = {
+                'id': u'c' + chapter_id,
+                'url': video_url,
+                'ext': video_ext,
+                'title': chapter_info['title'],
+                'thumbnail': chapter_info['preview'],
+                'description': chapter_info['description'],
+                'uploader': chapter_info['channel']['display_name'],
+                'uploader_id': chapter_info['channel']['name'],
+            }
+            return [info]
+        else:
+            video_id = mobj.group('videoid')
+            api = api_base + '/broadcast/by_archive/%s.json' % video_id
+
+        self.report_extraction(video_id)
+
+        info = []
+        offset = 0
+        limit = self._JUSTIN_PAGE_LIMIT
+        while True:
+            if paged:
+                self.report_download_page(video_id, offset)
+            page_url = api + ('?offset=%d&limit=%d' % (offset, limit))
+            page_count, page_info = self._parse_page(page_url, video_id)
+            info.extend(page_info)
+            if not paged or page_count != limit:
+                break
+            offset += limit
+        return info
diff --git a/youtube_dl/extractor/keek.py b/youtube_dl/extractor/keek.py
new file mode 100644
index 0000000..e2093a0
--- /dev/null
+++ b/youtube_dl/extractor/keek.py
@@ -0,0 +1,32 @@
+import re
+
+from .common import InfoExtractor
+
+
+class KeekIE(InfoExtractor):
+    _VALID_URL = r'http://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<videoID>\w+)'
+    IE_NAME = u'keek'
+
+    def _real_extract(self, url):
+        m = re.match(self._VALID_URL, url)
+        video_id = m.group('videoID')
+
+        video_url = u'http://cdn.keek.com/keek/video/%s' % video_id
+        thumbnail = u'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
+        webpage = self._download_webpage(url, video_id)
+
+        video_title = self._html_search_regex(r'<meta property="og:title" content="(?P<title>.*?)"',
+            webpage, u'title')
+
+        uploader = self._html_search_regex(r'<div class="user-name-and-bio">[\S\s]+?<h2>(?P<uploader>.+?)</h2>',
+            webpage, u'uploader', fatal=False)
+
+        info = {
+                'id': video_id,
+                'url': video_url,
+                'ext': 'mp4',
+                'title': video_title,
+                'thumbnail': thumbnail,
+                'uploader': uploader
+        }
+        return [info]
diff --git a/youtube_dl/extractor/liveleak.py b/youtube_dl/extractor/liveleak.py
new file mode 100644
index 0000000..d4b142e
--- /dev/null
+++ b/youtube_dl/extractor/liveleak.py
@@ -0,0 +1,44 @@
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+)
+
+
+class LiveLeakIE(InfoExtractor):
+
+    _VALID_URL = r'^(?:http?://)?(?:\w+\.)?liveleak\.com/view\?(?:.*?)i=(?P<video_id>[\w_]+)(?:.*)'
+    IE_NAME = u'liveleak'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        video_id = mobj.group('video_id')
+
+        webpage = self._download_webpage(url, video_id)
+
+        video_url = self._search_regex(r'file: "(.*?)",',
+            webpage, u'video URL')
+
+        video_title = self._html_search_regex(r'<meta property="og:title" content="(?P<title>.*?)"',
+            webpage, u'title').replace('LiveLeak.com -', '').strip()
+
+        video_description = self._html_search_regex(r'<meta property="og:description" content="(?P<desc>.*?)"',
+            webpage, u'description', fatal=False)
+
+        video_uploader = self._html_search_regex(r'By:.*?(\w+)</a>',
+            webpage, u'uploader', fatal=False)
+
+        info = {
+            'id':  video_id,
+            'url': video_url,
+            'ext': 'mp4',
+            'title': video_title,
+            'description': video_description,
+            'uploader': video_uploader
+        }
+
+        return [info]
diff --git a/youtube_dl/extractor/metacafe.py b/youtube_dl/extractor/metacafe.py
new file mode 100644
index 0000000..66d6554
--- /dev/null
+++ b/youtube_dl/extractor/metacafe.py
@@ -0,0 +1,110 @@
+import re
+import socket
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_http_client,
+    compat_parse_qs,
+    compat_urllib_error,
+    compat_urllib_parse,
+    compat_urllib_request,
+    compat_str,
+
+    ExtractorError,
+)
+
+class MetacafeIE(InfoExtractor):
+    """Information Extractor for metacafe.com."""
+
+    _VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
+    _DISCLAIMER = 'http://www.metacafe.com/family_filter/'
+    _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
+    IE_NAME = u'metacafe'
+
+    def report_disclaimer(self):
+        """Report disclaimer retrieval."""
+        self.to_screen(u'Retrieving disclaimer')
+
+    def _real_initialize(self):
+        # Retrieve disclaimer
+        request = compat_urllib_request.Request(self._DISCLAIMER)
+        try:
+            self.report_disclaimer()
+            compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            raise ExtractorError(u'Unable to retrieve disclaimer: %s' % compat_str(err))
+
+        # Confirm age
+        disclaimer_form = {
+            'filters': '0',
+            'submit': "Continue - I'm over 18",
+            }
+        request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
+        try:
+            self.report_age_confirmation()
+            compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err))
+
+    def _real_extract(self, url):
+        # Extract id and simplified title from URL
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        video_id = mobj.group(1)
+
+        # Check if video comes from YouTube
+        mobj2 = re.match(r'^yt-(.*)$', video_id)
+        if mobj2 is not None:
+            return [self.url_result('http://www.youtube.com/watch?v=%s' % mobj2.group(1), 'Youtube')]
+
+        # Retrieve video webpage to extract further information
+        webpage = self._download_webpage('http://www.metacafe.com/watch/%s/' % video_id, video_id)
+
+        # Extract URL, uploader and title from webpage
+        self.report_extraction(video_id)
+        mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
+        if mobj is not None:
+            mediaURL = compat_urllib_parse.unquote(mobj.group(1))
+            video_extension = mediaURL[-3:]
+
+            # Extract gdaKey if available
+            mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
+            if mobj is None:
+                video_url = mediaURL
+            else:
+                gdaKey = mobj.group(1)
+                video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
+        else:
+            mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
+            if mobj is None:
+                raise ExtractorError(u'Unable to extract media URL')
+            vardict = compat_parse_qs(mobj.group(1))
+            if 'mediaData' not in vardict:
+                raise ExtractorError(u'Unable to extract media URL')
+            mobj = re.search(r'"mediaURL":"(?P<mediaURL>http.*?)",(.*?)"key":"(?P<key>.*?)"', vardict['mediaData'][0])
+            if mobj is None:
+                raise ExtractorError(u'Unable to extract media URL')
+            mediaURL = mobj.group('mediaURL').replace('\\/', '/')
+            video_extension = mediaURL[-3:]
+            video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key'))
+
+        mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
+        if mobj is None:
+            raise ExtractorError(u'Unable to extract title')
+        video_title = mobj.group(1).decode('utf-8')
+
+        mobj = re.search(r'submitter=(.*?);', webpage)
+        if mobj is None:
+            raise ExtractorError(u'Unable to extract uploader nickname')
+        video_uploader = mobj.group(1)
+
+        return [{
+            'id':       video_id.decode('utf-8'),
+            'url':      video_url.decode('utf-8'),
+            'uploader': video_uploader.decode('utf-8'),
+            'upload_date':  None,
+            'title':    video_title,
+            'ext':      video_extension.decode('utf-8'),
+        }]
diff --git a/youtube_dl/extractor/mixcloud.py b/youtube_dl/extractor/mixcloud.py
new file mode 100644
index 0000000..8245b55
--- /dev/null
+++ b/youtube_dl/extractor/mixcloud.py
@@ -0,0 +1,115 @@
+import json
+import re
+import socket
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_http_client,
+    compat_str,
+    compat_urllib_error,
+    compat_urllib_request,
+
+    ExtractorError,
+)
+
+
+class MixcloudIE(InfoExtractor):
+    _WORKING = False # New API, but it seems good http://www.mixcloud.com/developers/documentation/
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([\w\d-]+)/([\w\d-]+)'
+    IE_NAME = u'mixcloud'
+
+    def report_download_json(self, file_id):
+        """Report JSON download."""
+        self.to_screen(u'Downloading json')
+
+    def get_urls(self, jsonData, fmt, bitrate='best'):
+        """Get urls from 'audio_formats' section in json"""
+        try:
+            bitrate_list = jsonData[fmt]
+            if bitrate is None or bitrate == 'best' or bitrate not in bitrate_list:
+                bitrate = max(bitrate_list) # select highest
+
+            url_list = jsonData[fmt][bitrate]
+        except TypeError: # we have no bitrate info.
+            url_list = jsonData[fmt]
+        return url_list
+
+    def check_urls(self, url_list):
+        """Returns 1st active url from list"""
+        for url in url_list:
+            try:
+                compat_urllib_request.urlopen(url)
+                return url
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error):
+                url = None
+
+        return None
+
+    def _print_formats(self, formats):
+        print('Available formats:')
+        for fmt in formats.keys():
+            for b in formats[fmt]:
+                try:
+                    ext = formats[fmt][b][0]
+                    print('%s\t%s\t[%s]' % (fmt, b, ext.split('.')[-1]))
+                except TypeError: # we have no bitrate info
+                    ext = formats[fmt][0]
+                    print('%s\t%s\t[%s]' % (fmt, '??', ext.split('.')[-1]))
+                    break
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+        # extract uploader & filename from url
+        uploader = mobj.group(1).decode('utf-8')
+        file_id = uploader + "-" + mobj.group(2).decode('utf-8')
+
+        # construct API request
+        file_url = 'http://www.mixcloud.com/api/1/cloudcast/' + '/'.join(url.split('/')[-3:-1]) + '.json'
+        # retrieve .json file with links to files
+        request = compat_urllib_request.Request(file_url)
+        try:
+            self.report_download_json(file_url)
+            jsonData = compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            raise ExtractorError(u'Unable to retrieve file: %s' % compat_str(err))
+
+        # parse JSON
+        json_data = json.loads(jsonData)
+        player_url = json_data['player_swf_url']
+        formats = dict(json_data['audio_formats'])
+
+        req_format = self._downloader.params.get('format', None)
+
+        if self._downloader.params.get('listformats', None):
+            self._print_formats(formats)
+            return
+
+        if req_format is None or req_format == 'best':
+            for format_param in formats.keys():
+                url_list = self.get_urls(formats, format_param)
+                # check urls
+                file_url = self.check_urls(url_list)
+                if file_url is not None:
+                    break # got it!
+        else:
+            if req_format not in formats:
+                raise ExtractorError(u'Format is not available')
+
+            url_list = self.get_urls(formats, req_format)
+            file_url = self.check_urls(url_list)
+            format_param = req_format
+
+        return [{
+            'id': file_id.decode('utf-8'),
+            'url': file_url.decode('utf-8'),
+            'uploader': uploader.decode('utf-8'),
+            'upload_date': None,
+            'title': json_data['name'],
+            'ext': file_url.split('.')[-1].decode('utf-8'),
+            'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
+            'thumbnail': json_data['thumbnail_url'],
+            'description': json_data['description'],
+            'player_url': player_url.decode('utf-8'),
+        }]
diff --git a/youtube_dl/extractor/mtv.py b/youtube_dl/extractor/mtv.py
new file mode 100644
index 0000000..a801c81
--- /dev/null
+++ b/youtube_dl/extractor/mtv.py
@@ -0,0 +1,72 @@
+import re
+import socket
+import xml.etree.ElementTree
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_http_client,
+    compat_str,
+    compat_urllib_error,
+    compat_urllib_request,
+
+    ExtractorError,
+)
+
+
+class MTVIE(InfoExtractor):
+    _VALID_URL = r'^(?P<proto>https?://)?(?:www\.)?mtv\.com/videos/[^/]+/(?P<videoid>[0-9]+)/[^/]+$'
+    _WORKING = False
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+        if not mobj.group('proto'):
+            url = 'http://' + url
+        video_id = mobj.group('videoid')
+
+        webpage = self._download_webpage(url, video_id)
+
+        #song_name = self._html_search_regex(r'<meta name="mtv_vt" content="([^"]+)"/>',
+        #    webpage, u'song name', fatal=False)
+
+        video_title = self._html_search_regex(r'<meta name="mtv_an" content="([^"]+)"/>',
+            webpage, u'title')
+
+        mtvn_uri = self._html_search_regex(r'<meta name="mtvn_uri" content="([^"]+)"/>',
+            webpage, u'mtvn_uri', fatal=False)
+
+        content_id = self._search_regex(r'MTVN.Player.defaultPlaylistId = ([0-9]+);',
+            webpage, u'content id', fatal=False)
+
+        videogen_url = 'http://www.mtv.com/player/includes/mediaGen.jhtml?uri=' + mtvn_uri + '&id=' + content_id + '&vid=' + video_id + '&ref=www.mtvn.com&viewUri=' + mtvn_uri
+        self.report_extraction(video_id)
+        request = compat_urllib_request.Request(videogen_url)
+        try:
+            metadataXml = compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            raise ExtractorError(u'Unable to download video metadata: %s' % compat_str(err))
+
+        mdoc = xml.etree.ElementTree.fromstring(metadataXml)
+        renditions = mdoc.findall('.//rendition')
+
+        # For now, always pick the highest quality.
+        rendition = renditions[-1]
+
+        try:
+            _,_,ext = rendition.attrib['type'].partition('/')
+            format = ext + '-' + rendition.attrib['width'] + 'x' + rendition.attrib['height'] + '_' + rendition.attrib['bitrate']
+            video_url = rendition.find('./src').text
+        except KeyError:
+            raise ExtractorError('Invalid rendition field.')
+
+        info = {
+            'id': video_id,
+            'url': video_url,
+            'upload_date': None,
+            'title': video_title,
+            'ext': ext,
+            'format': format,
+        }
+
+        return [info]
diff --git a/youtube_dl/extractor/myspass.py b/youtube_dl/extractor/myspass.py
new file mode 100644
index 0000000..7b016bb
--- /dev/null
+++ b/youtube_dl/extractor/myspass.py
@@ -0,0 +1,64 @@
+import os.path
+import xml.etree.ElementTree
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse_urlparse,
+
+    ExtractorError,
+)
+
+
+class MySpassIE(InfoExtractor):
+    _VALID_URL = r'http://www.myspass.de/.*'
+
+    def _real_extract(self, url):
+        META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s'
+
+        # video id is the last path element of the URL
+        # usually there is a trailing slash, so also try the second but last
+        url_path = compat_urllib_parse_urlparse(url).path
+        url_parent_path, video_id = os.path.split(url_path)
+        if not video_id:
+            _, video_id = os.path.split(url_parent_path)
+
+        # get metadata
+        metadata_url = META_DATA_URL_TEMPLATE % video_id
+        metadata_text = self._download_webpage(metadata_url, video_id)
+        metadata = xml.etree.ElementTree.fromstring(metadata_text.encode('utf-8'))
+
+        # extract values from metadata
+        url_flv_el = metadata.find('url_flv')
+        if url_flv_el is None:
+            raise ExtractorError(u'Unable to extract download url')
+        video_url = url_flv_el.text
+        extension = os.path.splitext(video_url)[1][1:]
+        title_el = metadata.find('title')
+        if title_el is None:
+            raise ExtractorError(u'Unable to extract title')
+        title = title_el.text
+        format_id_el = metadata.find('format_id')
+        if format_id_el is None:
+            format = 'mp4'
+        else:
+            format = format_id_el.text
+        description_el = metadata.find('description')
+        if description_el is not None:
+            description = description_el.text
+        else:
+            description = None
+        imagePreview_el = metadata.find('imagePreview')
+        if imagePreview_el is not None:
+            thumbnail = imagePreview_el.text
+        else:
+            thumbnail = None
+        info = {
+            'id': video_id,
+            'url': video_url,
+            'title': title,
+            'ext': extension,
+            'format': format,
+            'thumbnail': thumbnail,
+            'description': description
+        }
+        return [info]
diff --git a/youtube_dl/extractor/myvideo.py b/youtube_dl/extractor/myvideo.py
new file mode 100644
index 0000000..47a44e3
--- /dev/null
+++ b/youtube_dl/extractor/myvideo.py
@@ -0,0 +1,164 @@
+import binascii
+import base64
+import hashlib
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_ord,
+    compat_urllib_parse,
+
+    ExtractorError,
+)
+
+
+
+class MyVideoIE(InfoExtractor):
+    """Information Extractor for myvideo.de."""
+
+    _VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*'
+    IE_NAME = u'myvideo'
+
+    # Original Code from: https://github.com/dersphere/plugin.video.myvideo_de.git
+    # Released into the Public Domain by Tristan Fischer on 2013-05-19
+    # https://github.com/rg3/youtube-dl/pull/842
+    def __rc4crypt(self,data, key):
+        x = 0
+        box = list(range(256))
+        for i in list(range(256)):
+            x = (x + box[i] + compat_ord(key[i % len(key)])) % 256
+            box[i], box[x] = box[x], box[i]
+        x = 0
+        y = 0
+        out = ''
+        for char in data:
+            x = (x + 1) % 256
+            y = (y + box[x]) % 256
+            box[x], box[y] = box[y], box[x]
+            out += chr(compat_ord(char) ^ box[(box[x] + box[y]) % 256])
+        return out
+
+    def __md5(self,s):
+        return hashlib.md5(s).hexdigest().encode()
+
+    def _real_extract(self,url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'invalid URL: %s' % url)
+
+        video_id = mobj.group(1)
+
+        GK = (
+          b'WXpnME1EZGhNRGhpTTJNM01XVmhOREU0WldNNVpHTTJOakpt'
+          b'TW1FMU5tVTBNR05pWkRaa05XRXhNVFJoWVRVd1ptSXhaVEV3'
+          b'TnpsbA0KTVRkbU1tSTRNdz09'
+        )
+
+        # Get video webpage
+        webpage_url = 'http://www.myvideo.de/watch/%s' % video_id
+        webpage = self._download_webpage(webpage_url, video_id)
+
+        mobj = re.search('source src=\'(.+?)[.]([^.]+)\'', webpage)
+        if mobj is not None:
+            self.report_extraction(video_id)
+            video_url = mobj.group(1) + '.flv'
+
+            video_title = self._html_search_regex('<title>([^<]+)</title>',
+                webpage, u'title')
+
+            video_ext = self._search_regex('[.](.+?)$', video_url, u'extension')
+
+            return [{
+                'id':       video_id,
+                'url':      video_url,
+                'uploader': None,
+                'upload_date':  None,
+                'title':    video_title,
+                'ext':      video_ext,
+            }]
+
+        # try encxml
+        mobj = re.search('var flashvars={(.+?)}', webpage)
+        if mobj is None:
+            raise ExtractorError(u'Unable to extract video')
+
+        params = {}
+        encxml = ''
+        sec = mobj.group(1)
+        for (a, b) in re.findall('(.+?):\'(.+?)\',?', sec):
+            if not a == '_encxml':
+                params[a] = b
+            else:
+                encxml = compat_urllib_parse.unquote(b)
+        if not params.get('domain'):
+            params['domain'] = 'www.myvideo.de'
+        xmldata_url = '%s?%s' % (encxml, compat_urllib_parse.urlencode(params))
+        if 'flash_playertype=MTV' in xmldata_url:
+            self._downloader.report_warning(u'avoiding MTV player')
+            xmldata_url = (
+                'http://www.myvideo.de/dynamic/get_player_video_xml.php'
+                '?flash_playertype=D&ID=%s&_countlimit=4&autorun=yes'
+            ) % video_id
+
+        # get enc data
+        enc_data = self._download_webpage(xmldata_url, video_id).split('=')[1]
+        enc_data_b = binascii.unhexlify(enc_data)
+        sk = self.__md5(
+            base64.b64decode(base64.b64decode(GK)) +
+            self.__md5(
+                str(video_id).encode('utf-8')
+            )
+        )
+        dec_data = self.__rc4crypt(enc_data_b, sk)
+
+        # extracting infos
+        self.report_extraction(video_id)
+
+        video_url = None
+        mobj = re.search('connectionurl=\'(.*?)\'', dec_data)
+        if mobj:
+            video_url = compat_urllib_parse.unquote(mobj.group(1))
+            if 'myvideo2flash' in video_url:
+                self._downloader.report_warning(u'forcing RTMPT ...')
+                video_url = video_url.replace('rtmpe://', 'rtmpt://')
+
+        if not video_url:
+            # extract non rtmp videos
+            mobj = re.search('path=\'(http.*?)\' source=\'(.*?)\'', dec_data)
+            if mobj is None:
+                raise ExtractorError(u'unable to extract url')
+            video_url = compat_urllib_parse.unquote(mobj.group(1)) + compat_urllib_parse.unquote(mobj.group(2))
+
+        video_file = self._search_regex('source=\'(.*?)\'', dec_data, u'video file')
+        video_file = compat_urllib_parse.unquote(video_file)
+
+        if not video_file.endswith('f4m'):
+            ppath, prefix = video_file.split('.')
+            video_playpath = '%s:%s' % (prefix, ppath)
+            video_hls_playlist = ''
+        else:
+            video_playpath = ''
+            video_hls_playlist = (
+                video_file
+            ).replace('.f4m', '.m3u8')
+
+        video_swfobj = self._search_regex('swfobject.embedSWF\(\'(.+?)\'', webpage, u'swfobj')
+        video_swfobj = compat_urllib_parse.unquote(video_swfobj)
+
+        video_title = self._html_search_regex("<h1(?: class='globalHd')?>(.*?)</h1>",
+            webpage, u'title')
+
+        return [{
+            'id':                 video_id,
+            'url':                video_url,
+            'tc_url':             video_url,
+            'uploader':           None,
+            'upload_date':        None,
+            'title':              video_title,
+            'ext':                u'flv',
+            'play_path':          video_playpath,
+            'video_file':         video_file,
+            'video_hls_playlist': video_hls_playlist,
+            'player_url':         video_swfobj,
+        }]
+
diff --git a/youtube_dl/extractor/nba.py b/youtube_dl/extractor/nba.py
new file mode 100644
index 0000000..296d4cd
--- /dev/null
+++ b/youtube_dl/extractor/nba.py
@@ -0,0 +1,40 @@
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+)
+
+
+class NBAIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:watch\.|www\.)?nba\.com/(?:nba/)?video(/[^?]*?)(?:/index\.html)?(?:\?.*)?$'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        video_id = mobj.group(1)
+
+        webpage = self._download_webpage(url, video_id)
+
+        video_url = u'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4'
+
+        shortened_video_id = video_id.rpartition('/')[2]
+        title = self._html_search_regex(r'<meta property="og:title" content="(.*?)"',
+            webpage, 'title', default=shortened_video_id).replace('NBA.com: ', '')
+
+        # It isn't there in the HTML it returns to us
+        # uploader_date = self._html_search_regex(r'<b>Date:</b> (.*?)</div>', webpage, 'upload_date', fatal=False)
+
+        description = self._html_search_regex(r'<meta name="description" (?:content|value)="(.*?)" />', webpage, 'description', fatal=False)
+
+        info = {
+            'id': shortened_video_id,
+            'url': video_url,
+            'ext': 'mp4',
+            'title': title,
+            # 'uploader_date': uploader_date,
+            'description': description,
+        }
+        return [info]
diff --git a/youtube_dl/extractor/photobucket.py b/youtube_dl/extractor/photobucket.py
new file mode 100644
index 0000000..cd7fe6f
--- /dev/null
+++ b/youtube_dl/extractor/photobucket.py
@@ -0,0 +1,66 @@
+import datetime
+import json
+import re
+
+from .common import InfoExtractor
+
+from ..utils import (
+    ExtractorError,
+)
+
+class PhotobucketIE(InfoExtractor):
+    """Information extractor for photobucket.com."""
+
+    # TODO: the original _VALID_URL was:
+    # r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
+    # Check if it's necessary to keep the old extracion process
+    _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*(([\?\&]current=)|_)(?P<id>.*)\.(?P<ext>(flv)|(mp4))'
+    IE_NAME = u'photobucket'
+
+    def _real_extract(self, url):
+        # Extract id from URL
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        video_id = mobj.group('id')
+
+        video_extension = mobj.group('ext')
+
+        # Retrieve video webpage to extract further information
+        webpage = self._download_webpage(url, video_id)
+
+        # Extract URL, uploader, and title from webpage
+        self.report_extraction(video_id)
+        # We try first by looking the javascript code:
+        mobj = re.search(r'Pb\.Data\.Shared\.put\(Pb\.Data\.Shared\.MEDIA, (?P<json>.*?)\);', webpage)
+        if mobj is not None:
+            info = json.loads(mobj.group('json'))
+            return [{
+                'id':       video_id,
+                'url':      info[u'downloadUrl'],
+                'uploader': info[u'username'],
+                'upload_date':  datetime.date.fromtimestamp(info[u'creationDate']).strftime('%Y%m%d'),
+                'title':    info[u'title'],
+                'ext':      video_extension,
+                'thumbnail': info[u'thumbUrl'],
+            }]
+
+        # We try looking in other parts of the webpage
+        video_url = self._search_regex(r'<link rel="video_src" href=".*\?file=([^"]+)" />',
+            webpage, u'video URL')
+
+        mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
+        if mobj is None:
+            raise ExtractorError(u'Unable to extract title')
+        video_title = mobj.group(1).decode('utf-8')
+        video_uploader = mobj.group(2).decode('utf-8')
+
+        return [{
+            'id':       video_id.decode('utf-8'),
+            'url':      video_url.decode('utf-8'),
+            'uploader': video_uploader,
+            'upload_date':  None,
+            'title':    video_title,
+            'ext':      video_extension.decode('utf-8'),
+        }]
diff --git a/youtube_dl/extractor/pornotube.py b/youtube_dl/extractor/pornotube.py
new file mode 100644
index 0000000..0adb40d
--- /dev/null
+++ b/youtube_dl/extractor/pornotube.py
@@ -0,0 +1,41 @@
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse,
+
+    unified_strdate,
+)
+
+
+class PornotubeIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+
+        video_id = mobj.group('videoid')
+        video_title = mobj.group('title')
+
+        # Get webpage content
+        webpage = self._download_webpage(url, video_id)
+
+        # Get the video URL
+        VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
+        video_url = self._search_regex(VIDEO_URL_RE, webpage, u'video url')
+        video_url = compat_urllib_parse.unquote(video_url)
+
+        #Get the uploaded date
+        VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
+        upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, u'upload date', fatal=False)
+        if upload_date: upload_date = unified_strdate(upload_date)
+
+        info = {'id': video_id,
+                'url': video_url,
+                'uploader': None,
+                'upload_date': upload_date,
+                'title': video_title,
+                'ext': 'flv',
+                'format': 'flv'}
+
+        return [info]
diff --git a/youtube_dl/extractor/rbmaradio.py b/youtube_dl/extractor/rbmaradio.py
new file mode 100644
index 0000000..0c75eee
--- /dev/null
+++ b/youtube_dl/extractor/rbmaradio.py
@@ -0,0 +1,44 @@
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse_urlparse,
+
+    ExtractorError,
+)
+
+
+class RBMARadioIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?rbmaradio\.com/shows/(?P<videoID>[^/]+)$'
+
+    def _real_extract(self, url):
+        m = re.match(self._VALID_URL, url)
+        video_id = m.group('videoID')
+
+        webpage = self._download_webpage(url, video_id)
+
+        json_data = self._search_regex(r'window\.gon.*?gon\.show=(.+?);$',
+            webpage, u'json data', flags=re.MULTILINE)
+
+        try:
+            data = json.loads(json_data)
+        except ValueError as e:
+            raise ExtractorError(u'Invalid JSON: ' + str(e))
+
+        video_url = data['akamai_url'] + '&cbr=256'
+        url_parts = compat_urllib_parse_urlparse(video_url)
+        video_ext = url_parts.path.rpartition('.')[2]
+        info = {
+                'id': video_id,
+                'url': video_url,
+                'ext': video_ext,
+                'title': data['title'],
+                'description': data.get('teaser_text'),
+                'location': data.get('country_of_origin'),
+                'uploader': data.get('host', {}).get('name'),
+                'uploader_id': data.get('host', {}).get('slug'),
+                'thumbnail': data.get('image', {}).get('large_url_2x'),
+                'duration': data.get('duration'),
+        }
+        return [info]
diff --git a/youtube_dl/extractor/redtube.py b/youtube_dl/extractor/redtube.py
new file mode 100644
index 0000000..ebc4e23
--- /dev/null
+++ b/youtube_dl/extractor/redtube.py
@@ -0,0 +1,29 @@
+import re
+
+from .common import InfoExtractor
+
+
+class RedTubeIE(InfoExtractor):
+    _VALID_URL = r'(?:http://)?(?:www\.)?redtube\.com/(?P<id>[0-9]+)'
+
+    def _real_extract(self,url):
+        mobj = re.match(self._VALID_URL, url)
+
+        video_id = mobj.group('id')
+        video_extension = 'mp4'        
+        webpage = self._download_webpage(url, video_id)
+
+        self.report_extraction(video_id)
+
+        video_url = self._html_search_regex(r'<source src="(.+?)" type="video/mp4">',
+            webpage, u'video URL')
+
+        video_title = self._html_search_regex('<h1 class="videoTitle slidePanelMovable">(.+?)</h1>',
+            webpage, u'title')
+
+        return [{
+            'id':       video_id,
+            'url':      video_url,
+            'ext':      video_extension,
+            'title':    video_title,
+        }]
diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py
new file mode 100644
index 0000000..80d7e1b
--- /dev/null
+++ b/youtube_dl/extractor/soundcloud.py
@@ -0,0 +1,129 @@
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_str,
+
+    ExtractorError,
+    unified_strdate,
+)
+
+
+class SoundcloudIE(InfoExtractor):
+    """Information extractor for soundcloud.com
+       To access the media, the uid of the song and a stream token
+       must be extracted from the page source and the script must make
+       a request to media.soundcloud.com/crossdomain.xml. Then
+       the media can be grabbed by requesting from an url composed
+       of the stream token and uid
+     """
+
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)'
+    IE_NAME = u'soundcloud'
+
+    def report_resolve(self, video_id):
+        """Report information extraction."""
+        self.to_screen(u'%s: Resolving id' % video_id)
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        # extract uploader (which is in the url)
+        uploader = mobj.group(1)
+        # extract simple title (uploader + slug of song title)
+        slug_title =  mobj.group(2)
+        full_title = '%s/%s' % (uploader, slug_title)
+
+        self.report_resolve(full_title)
+
+        url = 'http://soundcloud.com/%s/%s' % (uploader, slug_title)
+        resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
+        info_json = self._download_webpage(resolv_url, full_title, u'Downloading info JSON')
+
+        info = json.loads(info_json)
+        video_id = info['id']
+        self.report_extraction(full_title)
+
+        streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
+        stream_json = self._download_webpage(streams_url, full_title,
+                                             u'Downloading stream definitions',
+                                             u'unable to download stream definitions')
+
+        streams = json.loads(stream_json)
+        mediaURL = streams['http_mp3_128_url']
+        upload_date = unified_strdate(info['created_at'])
+
+        return [{
+            'id':       info['id'],
+            'url':      mediaURL,
+            'uploader': info['user']['username'],
+            'upload_date': upload_date,
+            'title':    info['title'],
+            'ext':      u'mp3',
+            'description': info['description'],
+        }]
+
+class SoundcloudSetIE(InfoExtractor):
+    """Information extractor for soundcloud.com sets
+       To access the media, the uid of the song and a stream token
+       must be extracted from the page source and the script must make
+       a request to media.soundcloud.com/crossdomain.xml. Then
+       the media can be grabbed by requesting from an url composed
+       of the stream token and uid
+     """
+
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/sets/([\w\d-]+)'
+    IE_NAME = u'soundcloud:set'
+
+    def report_resolve(self, video_id):
+        """Report information extraction."""
+        self.to_screen(u'%s: Resolving id' % video_id)
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        # extract uploader (which is in the url)
+        uploader = mobj.group(1)
+        # extract simple title (uploader + slug of song title)
+        slug_title =  mobj.group(2)
+        full_title = '%s/sets/%s' % (uploader, slug_title)
+
+        self.report_resolve(full_title)
+
+        url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title)
+        resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
+        info_json = self._download_webpage(resolv_url, full_title)
+
+        videos = []
+        info = json.loads(info_json)
+        if 'errors' in info:
+            for err in info['errors']:
+                self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err['error_message']))
+            return
+
+        self.report_extraction(full_title)
+        for track in info['tracks']:
+            video_id = track['id']
+
+            streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
+            stream_json = self._download_webpage(streams_url, video_id, u'Downloading track info JSON')
+
+            self.report_extraction(video_id)
+            streams = json.loads(stream_json)
+            mediaURL = streams['http_mp3_128_url']
+
+            videos.append({
+                'id':       video_id,
+                'url':      mediaURL,
+                'uploader': track['user']['username'],
+                'upload_date':  unified_strdate(track['created_at']),
+                'title':    track['title'],
+                'ext':      u'mp3',
+                'description': track['description'],
+            })
+        return videos
diff --git a/youtube_dl/extractor/spiegel.py b/youtube_dl/extractor/spiegel.py
new file mode 100644
index 0000000..98a65b7
--- /dev/null
+++ b/youtube_dl/extractor/spiegel.py
@@ -0,0 +1,37 @@
+import re
+import xml.etree.ElementTree
+
+from .common import InfoExtractor
+
+
+class SpiegelIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?(?:#.*)?$'
+
+    def _real_extract(self, url):
+        m = re.match(self._VALID_URL, url)
+        video_id = m.group('videoID')
+
+        webpage = self._download_webpage(url, video_id)
+
+        video_title = self._html_search_regex(r'<div class="module-title">(.*?)</div>',
+            webpage, u'title')
+
+        xml_url = u'http://video2.spiegel.de/flash/' + video_id + u'.xml'
+        xml_code = self._download_webpage(xml_url, video_id,
+                    note=u'Downloading XML', errnote=u'Failed to download XML')
+
+        idoc = xml.etree.ElementTree.fromstring(xml_code)
+        last_type = idoc[-1]
+        filename = last_type.findall('./filename')[0].text
+        duration = float(last_type.findall('./duration')[0].text)
+
+        video_url = 'http://video2.spiegel.de/flash/' + filename
+        video_ext = filename.rpartition('.')[2]
+        info = {
+            'id': video_id,
+            'url': video_url,
+            'ext': video_ext,
+            'title': video_title,
+            'duration': duration,
+        }
+        return [info]
diff --git a/youtube_dl/extractor/stanfordoc.py b/youtube_dl/extractor/stanfordoc.py
new file mode 100644
index 0000000..8d3e32a
--- /dev/null
+++ b/youtube_dl/extractor/stanfordoc.py
@@ -0,0 +1,112 @@
+import re
+import socket
+import xml.etree.ElementTree
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_http_client,
+    compat_str,
+    compat_urllib_error,
+    compat_urllib_request,
+
+    ExtractorError,
+    orderedSet,
+    unescapeHTML,
+)
+
+
+class StanfordOpenClassroomIE(InfoExtractor):
+    """Information extractor for Stanford's Open ClassRoom"""
+
+    _VALID_URL = r'^(?:https?://)?openclassroom.stanford.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
+    IE_NAME = u'stanfordoc'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        if mobj.group('course') and mobj.group('video'): # A specific video
+            course = mobj.group('course')
+            video = mobj.group('video')
+            info = {
+                'id': course + '_' + video,
+                'uploader': None,
+                'upload_date': None,
+            }
+
+            self.report_extraction(info['id'])
+            baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
+            xmlUrl = baseUrl + video + '.xml'
+            try:
+                metaXml = compat_urllib_request.urlopen(xmlUrl).read()
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                raise ExtractorError(u'Unable to download video info XML: %s' % compat_str(err))
+            mdoc = xml.etree.ElementTree.fromstring(metaXml)
+            try:
+                info['title'] = mdoc.findall('./title')[0].text
+                info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
+            except IndexError:
+                raise ExtractorError(u'Invalid metadata XML file')
+            info['ext'] = info['url'].rpartition('.')[2]
+            return [info]
+        elif mobj.group('course'): # A course page
+            course = mobj.group('course')
+            info = {
+                'id': course,
+                'type': 'playlist',
+                'uploader': None,
+                'upload_date': None,
+            }
+
+            coursepage = self._download_webpage(url, info['id'],
+                                        note='Downloading course info page',
+                                        errnote='Unable to download course info page')
+
+            info['title'] = self._html_search_regex('<h1>([^<]+)</h1>', coursepage, 'title', default=info['id'])
+
+            info['description'] = self._html_search_regex('<description>([^<]+)</description>',
+                coursepage, u'description', fatal=False)
+
+            links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage))
+            info['list'] = [
+                {
+                    'type': 'reference',
+                    'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(vpage),
+                }
+                    for vpage in links]
+            results = []
+            for entry in info['list']:
+                assert entry['type'] == 'reference'
+                results += self.extract(entry['url'])
+            return results
+        else: # Root page
+            info = {
+                'id': 'Stanford OpenClassroom',
+                'type': 'playlist',
+                'uploader': None,
+                'upload_date': None,
+            }
+
+            self.report_download_webpage(info['id'])
+            rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
+            try:
+                rootpage = compat_urllib_request.urlopen(rootURL).read()
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                raise ExtractorError(u'Unable to download course info page: ' + compat_str(err))
+
+            info['title'] = info['id']
+
+            links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage))
+            info['list'] = [
+                {
+                    'type': 'reference',
+                    'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(cpage),
+                }
+                    for cpage in links]
+
+            results = []
+            for entry in info['list']:
+                assert entry['type'] == 'reference'
+                results += self.extract(entry['url'])
+            return results
diff --git a/youtube_dl/extractor/statigram.py b/youtube_dl/extractor/statigram.py
new file mode 100644
index 0000000..95d2ee3
--- /dev/null
+++ b/youtube_dl/extractor/statigram.py
@@ -0,0 +1,33 @@
+import re
+
+from .common import InfoExtractor
+
+class StatigramIE(InfoExtractor):
+    _VALID_URL = r'(?:http://)?(?:www\.)?statigr\.am/p/([^/]+)'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group(1)
+        webpage = self._download_webpage(url, video_id)
+        video_url = self._html_search_regex(
+            r'<meta property="og:video:secure_url" content="(.+?)">',
+            webpage, u'video URL')
+        thumbnail_url = self._html_search_regex(
+            r'<meta property="og:image" content="(.+?)" />',
+            webpage, u'thumbnail URL', fatal=False)
+        html_title = self._html_search_regex(
+            r'<title>(.+?)</title>',
+            webpage, u'title')
+        title = html_title.rpartition(u' | Statigram')[0]
+        uploader_id = self._html_search_regex(
+            r'@([^ ]+)', title, u'uploader name', fatal=False)
+        ext = 'mp4'
+
+        return [{
+            'id':        video_id,
+            'url':       video_url,
+            'ext':       ext,
+            'title':     title,
+            'thumbnail': thumbnail_url,
+            'uploader_id' : uploader_id
+        }]
diff --git a/youtube_dl/extractor/steam.py b/youtube_dl/extractor/steam.py
new file mode 100644
index 0000000..30cb832
--- /dev/null
+++ b/youtube_dl/extractor/steam.py
@@ -0,0 +1,63 @@
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    unescapeHTML,
+)
+
+
+class SteamIE(InfoExtractor):
+    _VALID_URL = r"""http://store\.steampowered\.com/
+                (agecheck/)?
+                (?P<urltype>video|app)/ #If the page is only for videos or for a game
+                (?P<gameID>\d+)/?
+                (?P<videoID>\d*)(?P<extra>\??) #For urltype == video we sometimes get the videoID
+                """
+    _VIDEO_PAGE_TEMPLATE = 'http://store.steampowered.com/video/%s/'
+    _AGECHECK_TEMPLATE = 'http://store.steampowered.com/agecheck/video/%s/?snr=1_agecheck_agecheck__age-gate&ageDay=1&ageMonth=January&ageYear=1970'
+
+    @classmethod
+    def suitable(cls, url):
+        """Receives a URL and returns True if suitable for this IE."""
+        return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
+
+    def _real_extract(self, url):
+        m = re.match(self._VALID_URL, url, re.VERBOSE)
+        gameID = m.group('gameID')
+
+        videourl = self._VIDEO_PAGE_TEMPLATE % gameID
+        webpage = self._download_webpage(videourl, gameID)
+
+        if re.search('<h2>Please enter your birth date to continue:</h2>', webpage) is not None:
+            videourl = self._AGECHECK_TEMPLATE % gameID
+            self.report_age_confirmation()
+            webpage = self._download_webpage(videourl, gameID)
+
+        self.report_extraction(gameID)
+        game_title = self._html_search_regex(r'<h2 class="pageheader">(.*?)</h2>',
+                                             webpage, 'game title')
+
+        urlRE = r"'movie_(?P<videoID>\d+)': \{\s*FILENAME: \"(?P<videoURL>[\w:/\.\?=]+)\"(,\s*MOVIE_NAME: \"(?P<videoName>[\w:/\.\?=\+-]+)\")?\s*\},"
+        mweb = re.finditer(urlRE, webpage)
+        namesRE = r'<span class="title">(?P<videoName>.+?)</span>'
+        titles = re.finditer(namesRE, webpage)
+        thumbsRE = r'<img class="movie_thumb" src="(?P<thumbnail>.+?)">'
+        thumbs = re.finditer(thumbsRE, webpage)
+        videos = []
+        for vid,vtitle,thumb in zip(mweb,titles,thumbs):
+            video_id = vid.group('videoID')
+            title = vtitle.group('videoName')
+            video_url = vid.group('videoURL')
+            video_thumb = thumb.group('thumbnail')
+            if not video_url:
+                raise ExtractorError(u'Cannot find video url for %s' % video_id)
+            info = {
+                'id':video_id,
+                'url':video_url,
+                'ext': 'flv',
+                'title': unescapeHTML(title),
+                'thumbnail': video_thumb
+                  }
+            videos.append(info)
+        return [self.playlist_result(videos, gameID, game_title)]
diff --git a/youtube_dl/extractor/teamcoco.py b/youtube_dl/extractor/teamcoco.py
new file mode 100644
index 0000000..092ac74
--- /dev/null
+++ b/youtube_dl/extractor/teamcoco.py
@@ -0,0 +1,46 @@
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+)
+
+
+class TeamcocoIE(InfoExtractor):
+    _VALID_URL = r'http://teamcoco\.com/video/(?P<url_title>.*)'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+        url_title = mobj.group('url_title')
+        webpage = self._download_webpage(url, url_title)
+
+        video_id = self._html_search_regex(r'<article class="video" data-id="(\d+?)"',
+            webpage, u'video id')
+
+        self.report_extraction(video_id)
+
+        video_title = self._html_search_regex(r'<meta property="og:title" content="(.+?)"',
+            webpage, u'title')
+
+        thumbnail = self._html_search_regex(r'<meta property="og:image" content="(.+?)"',
+            webpage, u'thumbnail', fatal=False)
+
+        video_description = self._html_search_regex(r'<meta property="og:description" content="(.*?)"',
+            webpage, u'description', fatal=False)
+
+        data_url = 'http://teamcoco.com/cvp/2.0/%s.xml' % video_id
+        data = self._download_webpage(data_url, video_id, 'Downloading data webpage')
+
+        video_url = self._html_search_regex(r'<file type="high".*?>(.*?)</file>',
+            data, u'video URL')
+
+        return [{
+            'id':          video_id,
+            'url':         video_url,
+            'ext':         'mp4',
+            'title':       video_title,
+            'thumbnail':   thumbnail,
+            'description': video_description,
+        }]
diff --git a/youtube_dl/extractor/ted.py b/youtube_dl/extractor/ted.py
new file mode 100644
index 0000000..ccad1c7
--- /dev/null
+++ b/youtube_dl/extractor/ted.py
@@ -0,0 +1,79 @@
+import json
+import re
+
+from .common import InfoExtractor
+
+
+class TEDIE(InfoExtractor):
+    _VALID_URL=r'''http://www\.ted\.com/
+                   (
+                        ((?P<type_playlist>playlists)/(?P<playlist_id>\d+)) # We have a playlist
+                        |
+                        ((?P<type_talk>talks)) # We have a simple talk
+                   )
+                   (/lang/(.*?))? # The url may contain the language
+                   /(?P<name>\w+) # Here goes the name and then ".html"
+                   '''
+
+    @classmethod
+    def suitable(cls, url):
+        """Receives a URL and returns True if suitable for this IE."""
+        return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
+
+    def _real_extract(self, url):
+        m=re.match(self._VALID_URL, url, re.VERBOSE)
+        if m.group('type_talk'):
+            return [self._talk_info(url)]
+        else :
+            playlist_id=m.group('playlist_id')
+            name=m.group('name')
+            self.to_screen(u'Getting info of playlist %s: "%s"' % (playlist_id,name))
+            return [self._playlist_videos_info(url,name,playlist_id)]
+
+    def _playlist_videos_info(self,url,name,playlist_id=0):
+        '''Returns the videos of the playlist'''
+        video_RE=r'''
+                     <li\ id="talk_(\d+)"([.\s]*?)data-id="(?P<video_id>\d+)"
+                     ([.\s]*?)data-playlist_item_id="(\d+)"
+                     ([.\s]*?)data-mediaslug="(?P<mediaSlug>.+?)"
+                     '''
+        video_name_RE=r'<p\ class="talk-title"><a href="(?P<talk_url>/talks/(.+).html)">(?P<fullname>.+?)</a></p>'
+        webpage=self._download_webpage(url, playlist_id, 'Downloading playlist webpage')
+        m_videos=re.finditer(video_RE,webpage,re.VERBOSE)
+        m_names=re.finditer(video_name_RE,webpage)
+
+        playlist_title = self._html_search_regex(r'div class="headline">\s*?<h1>\s*?<span>(.*?)</span>',
+                                                 webpage, 'playlist title')
+
+        playlist_entries = []
+        for m_video, m_name in zip(m_videos,m_names):
+            talk_url='http://www.ted.com%s' % m_name.group('talk_url')
+            playlist_entries.append(self.url_result(talk_url, 'TED'))
+        return self.playlist_result(playlist_entries, playlist_id = playlist_id, playlist_title = playlist_title)
+
+    def _talk_info(self, url, video_id=0):
+        """Return the video for the talk in the url"""
+        m = re.match(self._VALID_URL, url,re.VERBOSE)
+        video_name = m.group('name')
+        webpage = self._download_webpage(url, video_id, 'Downloading \"%s\" page' % video_name)
+        self.report_extraction(video_name)
+        # If the url includes the language we get the title translated
+        title = self._html_search_regex(r'<span id="altHeadline" >(?P<title>.*)</span>',
+                                        webpage, 'title')
+        json_data = self._search_regex(r'<script.*?>var talkDetails = ({.*?})</script>',
+                                    webpage, 'json data')
+        info = json.loads(json_data)
+        desc = self._html_search_regex(r'<div class="talk-intro">.*?<p.*?>(.*?)</p>',
+                                       webpage, 'description', flags = re.DOTALL)
+        
+        thumbnail = self._search_regex(r'</span>[\s.]*</div>[\s.]*<img src="(.*?)"',
+                                       webpage, 'thumbnail')
+        info = {
+                'id': info['id'],
+                'url': info['htmlStreams'][-1]['file'],
+                'ext': 'mp4',
+                'title': title,
+                'thumbnail': thumbnail,
+                'description': desc,
+                }
+        return info
diff --git a/youtube_dl/extractor/tumblr.py b/youtube_dl/extractor/tumblr.py
new file mode 100644
index 0000000..b0851b4
--- /dev/null
+++ b/youtube_dl/extractor/tumblr.py
@@ -0,0 +1,41 @@
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+)
+
+
+class TumblrIE(InfoExtractor):
+    _VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/((post)|(video))/(?P<id>\d*)/(.*?)'
+
+    def _real_extract(self, url):
+        m_url = re.match(self._VALID_URL, url)
+        video_id = m_url.group('id')
+        blog = m_url.group('blog_name')
+
+        url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id)
+        webpage = self._download_webpage(url, video_id)
+
+        re_video = r'src=\\x22(?P<video_url>http://%s\.tumblr\.com/video_file/%s/(.*?))\\x22 type=\\x22video/(?P<ext>.*?)\\x22' % (blog, video_id)
+        video = re.search(re_video, webpage)
+        if video is None:
+           raise ExtractorError(u'Unable to extract video')
+        video_url = video.group('video_url')
+        ext = video.group('ext')
+
+        video_thumbnail = self._search_regex(r'posters(.*?)\[\\x22(?P<thumb>.*?)\\x22',
+            webpage, u'thumbnail', fatal=False)  # We pick the first poster
+        if video_thumbnail: video_thumbnail = video_thumbnail.replace('\\', '')
+
+        # The only place where you can get a title, it's not complete,
+        # but searching in other places doesn't work for all videos
+        video_title = self._html_search_regex(r'<title>(?P<title>.*?)</title>',
+            webpage, u'title', flags=re.DOTALL)
+
+        return [{'id': video_id,
+                 'url': video_url,
+                 'title': video_title,
+                 'thumbnail': video_thumbnail,
+                 'ext': ext
+                 }]
diff --git a/youtube_dl/extractor/ustream.py b/youtube_dl/extractor/ustream.py
new file mode 100644
index 0000000..cfe3a68
--- /dev/null
+++ b/youtube_dl/extractor/ustream.py
@@ -0,0 +1,36 @@
+import re
+
+from .common import InfoExtractor
+
+
+class UstreamIE(InfoExtractor):
+    _VALID_URL = r'https?://www\.ustream\.tv/recorded/(?P<videoID>\d+)'
+    IE_NAME = u'ustream'
+
+    def _real_extract(self, url):
+        m = re.match(self._VALID_URL, url)
+        video_id = m.group('videoID')
+
+        video_url = u'http://tcdn.ustream.tv/video/%s' % video_id
+        webpage = self._download_webpage(url, video_id)
+
+        self.report_extraction(video_id)
+
+        video_title = self._html_search_regex(r'data-title="(?P<title>.+)"',
+            webpage, u'title')
+
+        uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>',
+            webpage, u'uploader', fatal=False, flags=re.DOTALL)
+
+        thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"',
+            webpage, u'thumbnail', fatal=False)
+
+        info = {
+                'id': video_id,
+                'url': video_url,
+                'ext': 'flv',
+                'title': video_title,
+                'uploader': uploader,
+                'thumbnail': thumbnail,
+               }
+        return info
diff --git a/youtube_dl/extractor/vbox7.py b/youtube_dl/extractor/vbox7.py
new file mode 100644
index 0000000..2f3ce46
--- /dev/null
+++ b/youtube_dl/extractor/vbox7.py
@@ -0,0 +1,46 @@
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse,
+    compat_urllib_request,
+
+    ExtractorError,
+)
+
+
+class Vbox7IE(InfoExtractor):
+    """Information Extractor for Vbox7"""
+    _VALID_URL = r'(?:http://)?(?:www\.)?vbox7\.com/play:([^/]+)'
+
+    def _real_extract(self,url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+        video_id = mobj.group(1)
+
+        redirect_page, urlh = self._download_webpage_handle(url, video_id)
+        new_location = self._search_regex(r'window\.location = \'(.*)\';', redirect_page, u'redirect location')
+        redirect_url = urlh.geturl() + new_location
+        webpage = self._download_webpage(redirect_url, video_id, u'Downloading redirect page')
+
+        title = self._html_search_regex(r'<title>(.*)</title>',
+            webpage, u'title').split('/')[0].strip()
+
+        ext = "flv"
+        info_url = "http://vbox7.com/play/magare.do"
+        data = compat_urllib_parse.urlencode({'as3':'1','vid':video_id})
+        info_request = compat_urllib_request.Request(info_url, data)
+        info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
+        info_response = self._download_webpage(info_request, video_id, u'Downloading info webpage')
+        if info_response is None:
+            raise ExtractorError(u'Unable to extract the media url')
+        (final_url, thumbnail_url) = map(lambda x: x.split('=')[1], info_response.split('&'))
+
+        return [{
+            'id':        video_id,
+            'url':       final_url,
+            'ext':       ext,
+            'title':     title,
+            'thumbnail': thumbnail_url,
+        }]
diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py
new file mode 100644
index 0000000..677cf4e
--- /dev/null
+++ b/youtube_dl/extractor/vimeo.py
@@ -0,0 +1,138 @@
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse,
+    compat_urllib_request,
+
+    clean_html,
+    get_element_by_attribute,
+    ExtractorError,
+    std_headers,
+)
+
+class VimeoIE(InfoExtractor):
+    """Information extractor for vimeo.com."""
+
+    # _VALID_URL matches Vimeo URLs
+    _VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo(?P<pro>pro)?\.com/(?:(?:(?:groups|album)/[^/]+)|(?:.*?)/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)'
+    IE_NAME = u'vimeo'
+
+    def _verify_video_password(self, url, video_id, webpage):
+        password = self._downloader.params.get('password', None)
+        if password is None:
+            raise ExtractorError(u'This video is protected by a password, use the --password option')
+        token = re.search(r'xsrft: \'(.*?)\'', webpage).group(1)
+        data = compat_urllib_parse.urlencode({'password': password,
+                                              'token': token})
+        # I didn't manage to use the password with https
+        if url.startswith('https'):
+            pass_url = url.replace('https','http')
+        else:
+            pass_url = url
+        password_request = compat_urllib_request.Request(pass_url+'/password', data)
+        password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
+        password_request.add_header('Cookie', 'xsrft=%s' % token)
+        self._download_webpage(password_request, video_id,
+                               u'Verifying the password',
+                               u'Wrong password')
+
+    def _real_extract(self, url, new_video=True):
+        # Extract ID from URL
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        video_id = mobj.group('id')
+        if not mobj.group('proto'):
+            url = 'https://' + url
+        if mobj.group('direct_link') or mobj.group('pro'):
+            url = 'https://vimeo.com/' + video_id
+
+        # Retrieve video webpage to extract further information
+        request = compat_urllib_request.Request(url, None, std_headers)
+        webpage = self._download_webpage(request, video_id)
+
+        # Now we begin extracting as much information as we can from what we
+        # retrieved. First we extract the information common to all extractors,
+        # and latter we extract those that are Vimeo specific.
+        self.report_extraction(video_id)
+
+        # Extract the config JSON
+        try:
+            config = webpage.split(' = {config:')[1].split(',assets:')[0]
+            config = json.loads(config)
+        except:
+            if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
+                raise ExtractorError(u'The author has restricted the access to this video, try with the "--referer" option')
+
+            if re.search('If so please provide the correct password.', webpage):
+                self._verify_video_password(url, video_id, webpage)
+                return self._real_extract(url)
+            else:
+                raise ExtractorError(u'Unable to extract info section')
+
+        # Extract title
+        video_title = config["video"]["title"]
+
+        # Extract uploader and uploader_id
+        video_uploader = config["video"]["owner"]["name"]
+        video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] if config["video"]["owner"]["url"] else None
+
+        # Extract video thumbnail
+        video_thumbnail = config["video"]["thumbnail"]
+
+        # Extract video description
+        video_description = get_element_by_attribute("itemprop", "description", webpage)
+        if video_description: video_description = clean_html(video_description)
+        else: video_description = u''
+
+        # Extract upload date
+        video_upload_date = None
+        mobj = re.search(r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage)
+        if mobj is not None:
+            video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
+
+        # Vimeo specific: extract request signature and timestamp
+        sig = config['request']['signature']
+        timestamp = config['request']['timestamp']
+
+        # Vimeo specific: extract video codec and quality information
+        # First consider quality, then codecs, then take everything
+        # TODO bind to format param
+        codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')]
+        files = { 'hd': [], 'sd': [], 'other': []}
+        for codec_name, codec_extension in codecs:
+            if codec_name in config["video"]["files"]:
+                if 'hd' in config["video"]["files"][codec_name]:
+                    files['hd'].append((codec_name, codec_extension, 'hd'))
+                elif 'sd' in config["video"]["files"][codec_name]:
+                    files['sd'].append((codec_name, codec_extension, 'sd'))
+                else:
+                    files['other'].append((codec_name, codec_extension, config["video"]["files"][codec_name][0]))
+
+        for quality in ('hd', 'sd', 'other'):
+            if len(files[quality]) > 0:
+                video_quality = files[quality][0][2]
+                video_codec = files[quality][0][0]
+                video_extension = files[quality][0][1]
+                self.to_screen(u'%s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
+                break
+        else:
+            raise ExtractorError(u'No known codec found')
+
+        video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
+                    %(video_id, sig, timestamp, video_quality, video_codec.upper())
+
+        return [{
+            'id':       video_id,
+            'url':      video_url,
+            'uploader': video_uploader,
+            'uploader_id': video_uploader_id,
+            'upload_date':  video_upload_date,
+            'title':    video_title,
+            'ext':      video_extension,
+            'thumbnail':    video_thumbnail,
+            'description':  video_description,
+        }]
diff --git a/youtube_dl/extractor/vine.py b/youtube_dl/extractor/vine.py
new file mode 100644
index 0000000..b44b1cb
--- /dev/null
+++ b/youtube_dl/extractor/vine.py
@@ -0,0 +1,37 @@
+import re
+
+from .common import InfoExtractor
+
+
+class VineIE(InfoExtractor):
+    _VALID_URL = r'(?:https?://)?(?:www\.)?vine\.co/v/(?P<id>\w+)'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+
+        video_id = mobj.group('id')
+        webpage_url = 'https://vine.co/v/' + video_id
+        webpage = self._download_webpage(webpage_url, video_id)
+
+        self.report_extraction(video_id)
+
+        video_url = self._html_search_regex(r'<meta property="twitter:player:stream" content="(.+?)"',
+            webpage, u'video URL')
+
+        video_title = self._html_search_regex(r'<meta property="og:title" content="(.+?)"',
+            webpage, u'title')
+
+        thumbnail = self._html_search_regex(r'<meta property="og:image" content="(.+?)(\?.*?)?"',
+            webpage, u'thumbnail', fatal=False)
+
+        uploader = self._html_search_regex(r'<div class="user">.*?<h2>(.+?)</h2>',
+            webpage, u'uploader', fatal=False, flags=re.DOTALL)
+
+        return [{
+            'id':        video_id,
+            'url':       video_url,
+            'ext':       'mp4',
+            'title':     video_title,
+            'thumbnail': thumbnail,
+            'uploader':  uploader,
+        }]
diff --git a/youtube_dl/extractor/worldstarhiphop.py b/youtube_dl/extractor/worldstarhiphop.py
new file mode 100644
index 0000000..54a77b6
--- /dev/null
+++ b/youtube_dl/extractor/worldstarhiphop.py
@@ -0,0 +1,44 @@
+import re
+
+from .common import InfoExtractor
+
+
+class WorldStarHipHopIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/videos/video\.php\?v=(?P<id>.*)'
+    IE_NAME = u'WorldStarHipHop'
+
+    def _real_extract(self, url):
+        m = re.match(self._VALID_URL, url)
+        video_id = m.group('id')
+
+        webpage_src = self._download_webpage(url, video_id)
+
+        video_url = self._search_regex(r'so\.addVariable\("file","(.*?)"\)',
+            webpage_src, u'video URL')
+
+        if 'mp4' in video_url:
+            ext = 'mp4'
+        else:
+            ext = 'flv'
+
+        video_title = self._html_search_regex(r"<title>(.*)</title>",
+            webpage_src, u'title')
+
+        # Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
+        thumbnail = self._html_search_regex(r'rel="image_src" href="(.*)" />',
+            webpage_src, u'thumbnail', fatal=False)
+
+        if not thumbnail:
+            _title = r"""candytitles.*>(.*)</span>"""
+            mobj = re.search(_title, webpage_src)
+            if mobj is not None:
+                video_title = mobj.group(1)
+
+        results = [{
+                    'id': video_id,
+                    'url' : video_url,
+                    'title' : video_title,
+                    'thumbnail' : thumbnail,
+                    'ext' : ext,
+                    }]
+        return results
diff --git a/youtube_dl/extractor/xhamster.py b/youtube_dl/extractor/xhamster.py
new file mode 100644
index 0000000..0013509
--- /dev/null
+++ b/youtube_dl/extractor/xhamster.py
@@ -0,0 +1,61 @@
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse,
+
+    ExtractorError,
+)
+
+
+class XHamsterIE(InfoExtractor):
+    """Information Extractor for xHamster"""
+    _VALID_URL = r'(?:http://)?(?:www.)?xhamster\.com/movies/(?P<id>[0-9]+)/.*\.html'
+
+    def _real_extract(self,url):
+        mobj = re.match(self._VALID_URL, url)
+
+        video_id = mobj.group('id')
+        mrss_url = 'http://xhamster.com/movies/%s/.html' % video_id
+        webpage = self._download_webpage(mrss_url, video_id)
+
+        mobj = re.search(r'\'srv\': \'(?P<server>[^\']*)\',\s*\'file\': \'(?P<file>[^\']+)\',', webpage)
+        if mobj is None:
+            raise ExtractorError(u'Unable to extract media URL')
+        if len(mobj.group('server')) == 0:
+            video_url = compat_urllib_parse.unquote(mobj.group('file'))
+        else:
+            video_url = mobj.group('server')+'/key='+mobj.group('file')
+        video_extension = video_url.split('.')[-1]
+
+        video_title = self._html_search_regex(r'<title>(?P<title>.+?) - xHamster\.com</title>',
+            webpage, u'title')
+
+        # Can't see the description anywhere in the UI
+        # video_description = self._html_search_regex(r'<span>Description: </span>(?P<description>[^<]+)',
+        #     webpage, u'description', fatal=False)
+        # if video_description: video_description = unescapeHTML(video_description)
+
+        mobj = re.search(r'hint=\'(?P<upload_date_Y>[0-9]{4})-(?P<upload_date_m>[0-9]{2})-(?P<upload_date_d>[0-9]{2}) [0-9]{2}:[0-9]{2}:[0-9]{2} [A-Z]{3,4}\'', webpage)
+        if mobj:
+            video_upload_date = mobj.group('upload_date_Y')+mobj.group('upload_date_m')+mobj.group('upload_date_d')
+        else:
+            video_upload_date = None
+            self._downloader.report_warning(u'Unable to extract upload date')
+
+        video_uploader_id = self._html_search_regex(r'<a href=\'/user/[^>]+>(?P<uploader_id>[^<]+)',
+            webpage, u'uploader id', default=u'anonymous')
+
+        video_thumbnail = self._search_regex(r'\'image\':\'(?P<thumbnail>[^\']+)\'',
+            webpage, u'thumbnail', fatal=False)
+
+        return [{
+            'id':       video_id,
+            'url':      video_url,
+            'ext':      video_extension,
+            'title':    video_title,
+            # 'description': video_description,
+            'upload_date': video_upload_date,
+            'uploader_id': video_uploader_id,
+            'thumbnail': video_thumbnail
+        }]
diff --git a/youtube_dl/extractor/xnxx.py b/youtube_dl/extractor/xnxx.py
new file mode 100644
index 0000000..68acb84
--- /dev/null
+++ b/youtube_dl/extractor/xnxx.py
@@ -0,0 +1,45 @@
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse,
+
+    ExtractorError,
+)
+
+
+class XNXXIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?video\.xnxx\.com/video([0-9]+)/(.*)'
+    VIDEO_URL_RE = r'flv_url=(.*?)&amp;'
+    VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM'
+    VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&amp;'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+        video_id = mobj.group(1)
+
+        # Get webpage content
+        webpage = self._download_webpage(url, video_id)
+
+        video_url = self._search_regex(self.VIDEO_URL_RE,
+            webpage, u'video URL')
+        video_url = compat_urllib_parse.unquote(video_url)
+
+        video_title = self._html_search_regex(self.VIDEO_TITLE_RE,
+            webpage, u'title')
+
+        video_thumbnail = self._search_regex(self.VIDEO_THUMB_RE,
+            webpage, u'thumbnail', fatal=False)
+
+        return [{
+            'id': video_id,
+            'url': video_url,
+            'uploader': None,
+            'upload_date': None,
+            'title': video_title,
+            'ext': 'flv',
+            'thumbnail': video_thumbnail,
+            'description': None,
+        }]
diff --git a/youtube_dl/extractor/xvideos.py b/youtube_dl/extractor/xvideos.py
new file mode 100644
index 0000000..8b3cb39
--- /dev/null
+++ b/youtube_dl/extractor/xvideos.py
@@ -0,0 +1,43 @@
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse,
+)
+
+
+class XVideosIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group(1)
+
+        webpage = self._download_webpage(url, video_id)
+
+        self.report_extraction(video_id)
+
+        # Extract video URL
+        video_url = compat_urllib_parse.unquote(self._search_regex(r'flv_url=(.+?)&',
+            webpage, u'video URL'))
+
+        # Extract title
+        video_title = self._html_search_regex(r'<title>(.*?)\s+-\s+XVID',
+            webpage, u'title')
+
+        # Extract video thumbnail
+        video_thumbnail = self._search_regex(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)',
+            webpage, u'thumbnail', fatal=False)
+
+        info = {
+            'id': video_id,
+            'url': video_url,
+            'uploader': None,
+            'upload_date': None,
+            'title': video_title,
+            'ext': 'flv',
+            'thumbnail': video_thumbnail,
+            'description': None,
+        }
+
+        return [info]
diff --git a/youtube_dl/extractor/yahoo.py b/youtube_dl/extractor/yahoo.py
new file mode 100644
index 0000000..4b3aec9
--- /dev/null
+++ b/youtube_dl/extractor/yahoo.py
@@ -0,0 +1,113 @@
+import datetime
+import itertools
+import json
+import re
+
+from .common import InfoExtractor, SearchInfoExtractor
+from ..utils import (
+    compat_urllib_parse,
+
+    ExtractorError,
+)
+
+class YahooIE(InfoExtractor):
+    """Information extractor for screen.yahoo.com."""
+    _VALID_URL = r'http://screen\.yahoo\.com/.*?-(?P<id>\d*?)\.html'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+        video_id = mobj.group('id')
+        webpage = self._download_webpage(url, video_id)
+        m_id = re.search(r'YUI\.namespace\("Media"\)\.CONTENT_ID = "(?P<new_id>.+?)";', webpage)
+
+        if m_id is None: 
+            # TODO: Check which url parameters are required
+            info_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;lmsoverride=1;outputformat=mrss;cb=974419660;id=%s;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id
+            webpage = self._download_webpage(info_url, video_id, u'Downloading info webpage')
+            info_re = r'''<title><!\[CDATA\[(?P<title>.*?)\]\]></title>.*
+                        <description><!\[CDATA\[(?P<description>.*?)\]\]></description>.*
+                        <media:pubStart><!\[CDATA\[(?P<date>.*?)\ .*\]\]></media:pubStart>.*
+                        <media:content\ medium="image"\ url="(?P<thumb>.*?)"\ name="LARGETHUMB"
+                        '''
+            self.report_extraction(video_id)
+            m_info = re.search(info_re, webpage, re.VERBOSE|re.DOTALL)
+            if m_info is None:
+                raise ExtractorError(u'Unable to extract video info')
+            video_title = m_info.group('title')
+            video_description = m_info.group('description')
+            video_thumb = m_info.group('thumb')
+            video_date = m_info.group('date')
+            video_date = datetime.datetime.strptime(video_date, '%m/%d/%Y').strftime('%Y%m%d')
+    
+            # TODO: Find a way to get mp4 videos
+            rest_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;element=stream;outputformat=mrss;id=%s;lmsoverride=1;bw=375;dynamicstream=1;cb=83521105;tech=flv,mp4;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id
+            webpage = self._download_webpage(rest_url, video_id, u'Downloading video url webpage')
+            m_rest = re.search(r'<media:content url="(?P<url>.*?)" path="(?P<path>.*?)"', webpage)
+            video_url = m_rest.group('url')
+            video_path = m_rest.group('path')
+            if m_rest is None:
+                raise ExtractorError(u'Unable to extract video url')
+
+        else: # We have to use a different method if another id is defined
+            long_id = m_id.group('new_id')
+            info_url = 'http://video.query.yahoo.com/v1/public/yql?q=SELECT%20*%20FROM%20yahoo.media.video.streams%20WHERE%20id%3D%22' + long_id + '%22%20AND%20format%3D%22mp4%2Cflv%22%20AND%20protocol%3D%22rtmp%2Chttp%22%20AND%20plrs%3D%2286Gj0vCaSzV_Iuf6hNylf2%22%20AND%20acctid%3D%22389%22%20AND%20plidl%3D%22%22%20AND%20pspid%3D%22792700001%22%20AND%20offnetwork%3D%22false%22%20AND%20site%3D%22ivy%22%20AND%20lang%3D%22en-US%22%20AND%20region%3D%22US%22%20AND%20override%3D%22none%22%3B&env=prod&format=json&callback=YUI.Env.JSONP.yui_3_8_1_1_1368368376830_335'
+            webpage = self._download_webpage(info_url, video_id, u'Downloading info json')
+            json_str = re.search(r'YUI.Env.JSONP.yui.*?\((.*?)\);', webpage).group(1)
+            info = json.loads(json_str)
+            res = info[u'query'][u'results'][u'mediaObj'][0]
+            stream = res[u'streams'][0]
+            video_path = stream[u'path']
+            video_url = stream[u'host']
+            meta = res[u'meta']
+            video_title = meta[u'title']
+            video_description = meta[u'description']
+            video_thumb = meta[u'thumbnail']
+            video_date = None # I can't find it
+
+        info_dict = {
+                     'id': video_id,
+                     'url': video_url,
+                     'play_path': video_path,
+                     'title':video_title,
+                     'description': video_description,
+                     'thumbnail': video_thumb,
+                     'upload_date': video_date,
+                     'ext': 'flv',
+                     }
+        return info_dict
+
+class YahooSearchIE(SearchInfoExtractor):
+    """Information Extractor for Yahoo! Video search queries."""
+
+    _MAX_RESULTS = 1000
+    IE_NAME = u'screen.yahoo:search'
+    _SEARCH_KEY = 'yvsearch'
+
+    def _get_n_results(self, query, n):
+        """Get a specified number of results for a query"""
+
+        res = {
+            '_type': 'playlist',
+            'id': query,
+            'entries': []
+        }
+        for pagenum in itertools.count(0): 
+            result_url = u'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
+            webpage = self._download_webpage(result_url, query,
+                                             note='Downloading results page '+str(pagenum+1))
+            info = json.loads(webpage)
+            m = info[u'm']
+            results = info[u'results']
+
+            for (i, r) in enumerate(results):
+                if (pagenum * 30) +i >= n:
+                    break
+                mobj = re.search(r'(?P<url>screen\.yahoo\.com/.*?-\d*?\.html)"', r)
+                e = self.url_result('http://' + mobj.group('url'), 'Yahoo')
+                res['entries'].append(e)
+            if (pagenum * 30 +i >= n) or (m[u'last'] >= (m[u'total'] -1 )):
+                break
+
+        return res
diff --git a/youtube_dl/extractor/youjizz.py b/youtube_dl/extractor/youjizz.py
new file mode 100644
index 0000000..d9efac7
--- /dev/null
+++ b/youtube_dl/extractor/youjizz.py
@@ -0,0 +1,45 @@
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+)
+
+
+class YouJizzIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youjizz\.com/videos/(?P<videoid>[^.]+).html$'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+
+        video_id = mobj.group('videoid')
+
+        # Get webpage content
+        webpage = self._download_webpage(url, video_id)
+
+        # Get the video title
+        video_title = self._html_search_regex(r'<title>(?P<title>.*)</title>',
+            webpage, u'title').strip()
+
+        # Get the embed page
+        result = re.search(r'https?://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)', webpage)
+        if result is None:
+            raise ExtractorError(u'ERROR: unable to extract embed page')
+
+        embed_page_url = result.group(0).strip()
+        video_id = result.group('videoid')
+
+        webpage = self._download_webpage(embed_page_url, video_id)
+
+        # Get the video URL
+        video_url = self._search_regex(r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);',
+            webpage, u'video URL')
+
+        info = {'id': video_id,
+                'url': video_url,
+                'title': video_title,
+                'ext': 'flv',
+                'format': 'flv',
+                'player_url': embed_page_url}
+
+        return [info]
diff --git a/youtube_dl/extractor/youku.py b/youtube_dl/extractor/youku.py
new file mode 100644
index 0000000..ed43f42
--- /dev/null
+++ b/youtube_dl/extractor/youku.py
@@ -0,0 +1,104 @@
+import json
+import math
+import random
+import re
+import time
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+)
+
+
+class YoukuIE(InfoExtractor):
+    _VALID_URL =  r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html'
+
+    def _gen_sid(self):
+        nowTime = int(time.time() * 1000)
+        random1 = random.randint(1000,1998)
+        random2 = random.randint(1000,9999)
+
+        return "%d%d%d" %(nowTime,random1,random2)
+
+    def _get_file_ID_mix_string(self, seed):
+        mixed = []
+        source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890")
+        seed = float(seed)
+        for i in range(len(source)):
+            seed  =  (seed * 211 + 30031 ) % 65536
+            index  =  math.floor(seed / 65536 * len(source) )
+            mixed.append(source[int(index)])
+            source.remove(source[int(index)])
+        #return ''.join(mixed)
+        return mixed
+
+    def _get_file_id(self, fileId, seed):
+        mixed = self._get_file_ID_mix_string(seed)
+        ids = fileId.split('*')
+        realId = []
+        for ch in ids:
+            if ch:
+                realId.append(mixed[int(ch)])
+        return ''.join(realId)
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+        video_id = mobj.group('ID')
+
+        info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
+
+        jsondata = self._download_webpage(info_url, video_id)
+
+        self.report_extraction(video_id)
+        try:
+            config = json.loads(jsondata)
+
+            video_title =  config['data'][0]['title']
+            seed = config['data'][0]['seed']
+
+            format = self._downloader.params.get('format', None)
+            supported_format = list(config['data'][0]['streamfileids'].keys())
+
+            if format is None or format == 'best':
+                if 'hd2' in supported_format:
+                    format = 'hd2'
+                else:
+                    format = 'flv'
+                ext = u'flv'
+            elif format == 'worst':
+                format = 'mp4'
+                ext = u'mp4'
+            else:
+                format = 'flv'
+                ext = u'flv'
+
+
+            fileid = config['data'][0]['streamfileids'][format]
+            keys = [s['k'] for s in config['data'][0]['segs'][format]]
+        except (UnicodeDecodeError, ValueError, KeyError):
+            raise ExtractorError(u'Unable to extract info section')
+
+        files_info=[]
+        sid = self._gen_sid()
+        fileid = self._get_file_id(fileid, seed)
+
+        #column 8,9 of fileid represent the segment number
+        #fileid[7:9] should be changed
+        for index, key in enumerate(keys):
+
+            temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
+            download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
+
+            info = {
+                'id': '%s_part%02d' % (video_id, index),
+                'url': download_url,
+                'uploader': None,
+                'upload_date': None,
+                'title': video_title,
+                'ext': ext,
+            }
+            files_info.append(info)
+
+        return files_info
diff --git a/youtube_dl/extractor/youporn.py b/youtube_dl/extractor/youporn.py
new file mode 100644
index 0000000..5d8b7db
--- /dev/null
+++ b/youtube_dl/extractor/youporn.py
@@ -0,0 +1,117 @@
+import json
+import os
+import re
+import sys
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_urllib_parse_urlparse,
+    compat_urllib_request,
+
+    ExtractorError,
+    unescapeHTML,
+    unified_strdate,
+)
+
+
+class YouPornIE(InfoExtractor):
+    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)'
+
+    def _print_formats(self, formats):
+        """Print all available formats"""
+        print(u'Available formats:')
+        print(u'ext\t\tformat')
+        print(u'---------------------------------')
+        for format in formats:
+            print(u'%s\t\t%s'  % (format['ext'], format['format']))
+
+    def _specific(self, req_format, formats):
+        for x in formats:
+            if x["format"] == req_format:
+                return x
+        return None
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('videoid')
+
+        req = compat_urllib_request.Request(url)
+        req.add_header('Cookie', 'age_verified=1')
+        webpage = self._download_webpage(req, video_id)
+
+        # Get JSON parameters
+        json_params = self._search_regex(r'var currentVideo = new Video\((.*)\);', webpage, u'JSON parameters')
+        try:
+            params = json.loads(json_params)
+        except:
+            raise ExtractorError(u'Invalid JSON')
+
+        self.report_extraction(video_id)
+        try:
+            video_title = params['title']
+            upload_date = unified_strdate(params['release_date_f'])
+            video_description = params['description']
+            video_uploader = params['submitted_by']
+            thumbnail = params['thumbnails'][0]['image']
+        except KeyError:
+            raise ExtractorError('Missing JSON parameter: ' + sys.exc_info()[1])
+
+        # Get all of the formats available
+        DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
+        download_list_html = self._search_regex(DOWNLOAD_LIST_RE,
+            webpage, u'download list').strip()
+
+        # Get all of the links from the page
+        LINK_RE = r'(?s)<a href="(?P<url>[^"]+)">'
+        links = re.findall(LINK_RE, download_list_html)
+        if(len(links) == 0):
+            raise ExtractorError(u'ERROR: no known formats available for video')
+
+        self.to_screen(u'Links found: %d' % len(links))
+
+        formats = []
+        for link in links:
+
+            # A link looks like this:
+            # http://cdn1.download.youporn.phncdn.com/201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4?nvb=20121113051249&nva=20121114051249&ir=1200&sr=1200&hash=014b882080310e95fb6a0
+            # A path looks like this:
+            # /201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4
+            video_url = unescapeHTML( link )
+            path = compat_urllib_parse_urlparse( video_url ).path
+            extension = os.path.splitext( path )[1][1:]
+            format = path.split('/')[4].split('_')[:2]
+            # size = format[0]
+            # bitrate = format[1]
+            format = "-".join( format )
+            # title = u'%s-%s-%s' % (video_title, size, bitrate)
+
+            formats.append({
+                'id': video_id,
+                'url': video_url,
+                'uploader': video_uploader,
+                'upload_date': upload_date,
+                'title': video_title,
+                'ext': extension,
+                'format': format,
+                'thumbnail': thumbnail,
+                'description': video_description
+            })
+
+        if self._downloader.params.get('listformats', None):
+            self._print_formats(formats)
+            return
+
+        req_format = self._downloader.params.get('format', None)
+        self.to_screen(u'Format: %s' % req_format)
+
+        if req_format is None or req_format == 'best':
+            return [formats[0]]
+        elif req_format == 'worst':
+            return [formats[-1]]
+        elif req_format in ('-1', 'all'):
+            return formats
+        else:
+            format = self._specific( req_format, formats )
+            if format is None:
+                raise ExtractorError(u'Requested format not available')
+            return [format]
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
new file mode 100644
index 0000000..fbe8d63
--- /dev/null
+++ b/youtube_dl/extractor/youtube.py
@@ -0,0 +1,795 @@
+# coding: utf-8
+
+import json
+import netrc
+import re
+import socket
+
+from .common import InfoExtractor, SearchInfoExtractor
+from ..utils import (
+    compat_http_client,
+    compat_parse_qs,
+    compat_urllib_error,
+    compat_urllib_parse,
+    compat_urllib_request,
+    compat_str,
+
+    clean_html,
+    get_element_by_id,
+    ExtractorError,
+    unescapeHTML,
+    unified_strdate,
+)
+
+
+class YoutubeIE(InfoExtractor):
+    """Information extractor for youtube.com."""
+
+    _VALID_URL = r"""^
+                     (
+                         (?:https?://)?                                       # http(s):// (optional)
+                         (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/|
+                            tube\.majestyc\.net/)                             # the various hostnames, with wildcard subdomains
+                         (?:.*?\#/)?                                          # handle anchor (#/) redirect urls
+                         (?:                                                  # the various things that can precede the ID:
+                             (?:(?:v|embed|e)/)                               # v/ or embed/ or e/
+                             |(?:                                             # or the v= param in all its forms
+                                 (?:watch(?:_popup)?(?:\.php)?)?              # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
+                                 (?:\?|\#!?)                                  # the params delimiter ? or # or #!
+                                 (?:.*?&)?                                    # any other preceding param (like /?s=tuff&v=xxxx)
+                                 v=
+                             )
+                         )?                                                   # optional -> youtube.com/xxxx is OK
+                     )?                                                       # all until now is optional -> you can pass the naked ID
+                     ([0-9A-Za-z_-]+)                                         # here is it! the YouTube video ID
+                     (?(1).+)?                                                # if we found the ID, everything can follow
+                     $"""
+    _LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
+    _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
+    _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
+    _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
+    _NETRC_MACHINE = 'youtube'
+    # Listed in order of quality
+    _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13']
+    _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13']
+    _video_extensions = {
+        '13': '3gp',
+        '17': 'mp4',
+        '18': 'mp4',
+        '22': 'mp4',
+        '37': 'mp4',
+        '38': 'video', # You actually don't know if this will be MOV, AVI or whatever
+        '43': 'webm',
+        '44': 'webm',
+        '45': 'webm',
+        '46': 'webm',
+    }
+    _video_dimensions = {
+        '5': '240x400',
+        '6': '???',
+        '13': '???',
+        '17': '144x176',
+        '18': '360x640',
+        '22': '720x1280',
+        '34': '360x640',
+        '35': '480x854',
+        '37': '1080x1920',
+        '38': '3072x4096',
+        '43': '360x640',
+        '44': '480x854',
+        '45': '720x1280',
+        '46': '1080x1920',
+    }
+    IE_NAME = u'youtube'
+
+    @classmethod
+    def suitable(cls, url):
+        """Receives a URL and returns True if suitable for this IE."""
+        if YoutubePlaylistIE.suitable(url): return False
+        return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
+
+    def report_lang(self):
+        """Report attempt to set language."""
+        self.to_screen(u'Setting language')
+
+    def report_login(self):
+        """Report attempt to log in."""
+        self.to_screen(u'Logging in')
+
+    def report_video_webpage_download(self, video_id):
+        """Report attempt to download video webpage."""
+        self.to_screen(u'%s: Downloading video webpage' % video_id)
+
+    def report_video_info_webpage_download(self, video_id):
+        """Report attempt to download video info webpage."""
+        self.to_screen(u'%s: Downloading video info webpage' % video_id)
+
+    def report_video_subtitles_download(self, video_id):
+        """Report attempt to download video info webpage."""
+        self.to_screen(u'%s: Checking available subtitles' % video_id)
+
+    def report_video_subtitles_request(self, video_id, sub_lang, format):
+        """Report attempt to download video info webpage."""
+        self.to_screen(u'%s: Downloading video subtitles for %s.%s' % (video_id, sub_lang, format))
+
+    def report_video_subtitles_available(self, video_id, sub_lang_list):
+        """Report available subtitles."""
+        sub_lang = ",".join(list(sub_lang_list.keys()))
+        self.to_screen(u'%s: Available subtitles for video: %s' % (video_id, sub_lang))
+
+    def report_information_extraction(self, video_id):
+        """Report attempt to extract video information."""
+        self.to_screen(u'%s: Extracting video information' % video_id)
+
+    def report_unavailable_format(self, video_id, format):
+        """Report extracted video URL."""
+        self.to_screen(u'%s: Format %s not available' % (video_id, format))
+
+    def report_rtmp_download(self):
+        """Indicate the download will use the RTMP protocol."""
+        self.to_screen(u'RTMP download detected')
+
+    @staticmethod
+    def _decrypt_signature(s):
+        """Decrypt the key the two subkeys must have a length of 43"""
+        (a,b) = s.split('.')
+        if len(a) != 43 or len(b) != 43:
+            raise ExtractorError(u'Unable to decrypt signature, subkeys lengths not valid')
+        b = ''.join([b[:8],a[0],b[9:18],b[-4],b[19:39], b[18]])[0:40]
+        a = a[-40:]
+        s_dec = '.'.join((a,b))[::-1]
+        return s_dec
+
+    def _get_available_subtitles(self, video_id):
+        self.report_video_subtitles_download(video_id)
+        request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
+        try:
+            sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            return (u'unable to download video subtitles: %s' % compat_str(err), None)
+        sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
+        sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list)
+        if not sub_lang_list:
+            return (u'video doesn\'t have subtitles', None)
+        return sub_lang_list
+
+    def _list_available_subtitles(self, video_id):
+        sub_lang_list = self._get_available_subtitles(video_id)
+        self.report_video_subtitles_available(video_id, sub_lang_list)
+
+    def _request_subtitle(self, sub_lang, sub_name, video_id, format):
+        """
+        Return tuple:
+        (error_message, sub_lang, sub)
+        """
+        self.report_video_subtitles_request(video_id, sub_lang, format)
+        params = compat_urllib_parse.urlencode({
+            'lang': sub_lang,
+            'name': sub_name,
+            'v': video_id,
+            'fmt': format,
+        })
+        url = 'http://www.youtube.com/api/timedtext?' + params
+        try:
+            sub = compat_urllib_request.urlopen(url).read().decode('utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            return (u'unable to download video subtitles: %s' % compat_str(err), None, None)
+        if not sub:
+            return (u'Did not fetch video subtitles', None, None)
+        return (None, sub_lang, sub)
+
+    def _request_automatic_caption(self, video_id, webpage):
+        """We need the webpage for getting the captions url, pass it as an
+           argument to speed up the process."""
+        sub_lang = self._downloader.params.get('subtitleslang') or 'en'
+        sub_format = self._downloader.params.get('subtitlesformat')
+        self.to_screen(u'%s: Looking for automatic captions' % video_id)
+        mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
+        err_msg = u'Couldn\'t find automatic captions for "%s"' % sub_lang
+        if mobj is None:
+            return [(err_msg, None, None)]
+        player_config = json.loads(mobj.group(1))
+        try:
+            args = player_config[u'args']
+            caption_url = args[u'ttsurl']
+            timestamp = args[u'timestamp']
+            params = compat_urllib_parse.urlencode({
+                'lang': 'en',
+                'tlang': sub_lang,
+                'fmt': sub_format,
+                'ts': timestamp,
+                'kind': 'asr',
+            })
+            subtitles_url = caption_url + '&' + params
+            sub = self._download_webpage(subtitles_url, video_id, u'Downloading automatic captions')
+            return [(None, sub_lang, sub)]
+        except KeyError:
+            return [(err_msg, None, None)]
+
+    def _extract_subtitle(self, video_id):
+        """
+        Return a list with a tuple:
+        [(error_message, sub_lang, sub)]
+        """
+        sub_lang_list = self._get_available_subtitles(video_id)
+        sub_format = self._downloader.params.get('subtitlesformat')
+        if  isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
+            return [(sub_lang_list[0], None, None)]
+        if self._downloader.params.get('subtitleslang', False):
+            sub_lang = self._downloader.params.get('subtitleslang')
+        elif 'en' in sub_lang_list:
+            sub_lang = 'en'
+        else:
+            sub_lang = list(sub_lang_list.keys())[0]
+        if not sub_lang in sub_lang_list:
+            return [(u'no closed captions found in the specified language "%s"' % sub_lang, None, None)]
+
+        subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
+        return [subtitle]
+
+    def _extract_all_subtitles(self, video_id):
+        sub_lang_list = self._get_available_subtitles(video_id)
+        sub_format = self._downloader.params.get('subtitlesformat')
+        if  isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
+            return [(sub_lang_list[0], None, None)]
+        subtitles = []
+        for sub_lang in sub_lang_list:
+            subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
+            subtitles.append(subtitle)
+        return subtitles
+
+    def _print_formats(self, formats):
+        print('Available formats:')
+        for x in formats:
+            print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???')))
+
+    def _real_initialize(self):
+        if self._downloader is None:
+            return
+
+        username = None
+        password = None
+        downloader_params = self._downloader.params
+
+        # Attempt to use provided username and password or .netrc data
+        if downloader_params.get('username', None) is not None:
+            username = downloader_params['username']
+            password = downloader_params['password']
+        elif downloader_params.get('usenetrc', False):
+            try:
+                info = netrc.netrc().authenticators(self._NETRC_MACHINE)
+                if info is not None:
+                    username = info[0]
+                    password = info[2]
+                else:
+                    raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
+            except (IOError, netrc.NetrcParseError) as err:
+                self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
+                return
+
+        # Set language
+        request = compat_urllib_request.Request(self._LANG_URL)
+        try:
+            self.report_lang()
+            compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.report_warning(u'unable to set language: %s' % compat_str(err))
+            return
+
+        # No authentication to be performed
+        if username is None:
+            return
+
+        request = compat_urllib_request.Request(self._LOGIN_URL)
+        try:
+            login_page = compat_urllib_request.urlopen(request).read().decode('utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.report_warning(u'unable to fetch login page: %s' % compat_str(err))
+            return
+
+        galx = None
+        dsh = None
+        match = re.search(re.compile(r'<input.+?name="GALX".+?value="(.+?)"', re.DOTALL), login_page)
+        if match:
+          galx = match.group(1)
+
+        match = re.search(re.compile(r'<input.+?name="dsh".+?value="(.+?)"', re.DOTALL), login_page)
+        if match:
+          dsh = match.group(1)
+
+        # Log in
+        login_form_strs = {
+                u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
+                u'Email': username,
+                u'GALX': galx,
+                u'Passwd': password,
+                u'PersistentCookie': u'yes',
+                u'_utf8': u'霱',
+                u'bgresponse': u'js_disabled',
+                u'checkConnection': u'',
+                u'checkedDomains': u'youtube',
+                u'dnConn': u'',
+                u'dsh': dsh,
+                u'pstMsg': u'0',
+                u'rmShown': u'1',
+                u'secTok': u'',
+                u'signIn': u'Sign in',
+                u'timeStmp': u'',
+                u'service': u'youtube',
+                u'uilel': u'3',
+                u'hl': u'en_US',
+        }
+        # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
+        # chokes on unicode
+        login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
+        login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
+        request = compat_urllib_request.Request(self._LOGIN_URL, login_data)
+        try:
+            self.report_login()
+            login_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
+            if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
+                self._downloader.report_warning(u'unable to log in: bad username or password')
+                return
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))
+            return
+
+        # Confirm age
+        age_form = {
+                'next_url':     '/',
+                'action_confirm':   'Confirm',
+                }
+        request = compat_urllib_request.Request(self._AGE_URL, compat_urllib_parse.urlencode(age_form))
+        try:
+            self.report_age_confirmation()
+            compat_urllib_request.urlopen(request).read().decode('utf-8')
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err))
+
+    def _extract_id(self, url):
+        mobj = re.match(self._VALID_URL, url, re.VERBOSE)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+        video_id = mobj.group(2)
+        return video_id
+
+    def _real_extract(self, url):
+        # Extract original video URL from URL with redirection, like age verification, using next_url parameter
+        mobj = re.search(self._NEXT_URL_RE, url)
+        if mobj:
+            url = 'https://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
+        video_id = self._extract_id(url)
+
+        # Get video webpage
+        self.report_video_webpage_download(video_id)
+        url = 'https://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
+        request = compat_urllib_request.Request(url)
+        try:
+            video_webpage_bytes = compat_urllib_request.urlopen(request).read()
+        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+            raise ExtractorError(u'Unable to download video webpage: %s' % compat_str(err))
+
+        video_webpage = video_webpage_bytes.decode('utf-8', 'ignore')
+
+        # Attempt to extract SWF player URL
+        mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
+        if mobj is not None:
+            player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
+        else:
+            player_url = None
+
+        # Get video info
+        self.report_video_info_webpage_download(video_id)
+        for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
+            video_info_url = ('https://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
+                    % (video_id, el_type))
+            video_info_webpage = self._download_webpage(video_info_url, video_id,
+                                    note=False,
+                                    errnote='unable to download video info webpage')
+            video_info = compat_parse_qs(video_info_webpage)
+            if 'token' in video_info:
+                break
+        if 'token' not in video_info:
+            if 'reason' in video_info:
+                raise ExtractorError(u'YouTube said: %s' % video_info['reason'][0])
+            else:
+                raise ExtractorError(u'"token" parameter not in video info for unknown reason')
+
+        # Check for "rental" videos
+        if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
+            raise ExtractorError(u'"rental" videos not supported')
+
+        # Start extracting information
+        self.report_information_extraction(video_id)
+
+        # uploader
+        if 'author' not in video_info:
+            raise ExtractorError(u'Unable to extract uploader name')
+        video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
+
+        # uploader_id
+        video_uploader_id = None
+        mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
+        if mobj is not None:
+            video_uploader_id = mobj.group(1)
+        else:
+            self._downloader.report_warning(u'unable to extract uploader nickname')
+
+        # title
+        if 'title' not in video_info:
+            raise ExtractorError(u'Unable to extract video title')
+        video_title = compat_urllib_parse.unquote_plus(video_info['title'][0])
+
+        # thumbnail image
+        if 'thumbnail_url' not in video_info:
+            self._downloader.report_warning(u'unable to extract video thumbnail')
+            video_thumbnail = ''
+        else:   # don't panic if we can't find it
+            video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
+
+        # upload date
+        upload_date = None
+        mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL)
+        if mobj is not None:
+            upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
+            upload_date = unified_strdate(upload_date)
+
+        # description
+        video_description = get_element_by_id("eow-description", video_webpage)
+        if video_description:
+            video_description = clean_html(video_description)
+        else:
+            fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
+            if fd_mobj:
+                video_description = unescapeHTML(fd_mobj.group(1))
+            else:
+                video_description = u''
+
+        # subtitles
+        video_subtitles = None
+
+        if self._downloader.params.get('writesubtitles', False):
+            video_subtitles = self._extract_subtitle(video_id)
+            if video_subtitles:
+                (sub_error, sub_lang, sub) = video_subtitles[0]
+                if sub_error:
+                    # We try with the automatic captions
+                    video_subtitles = self._request_automatic_caption(video_id, video_webpage)
+                    (sub_error_auto, sub_lang, sub) = video_subtitles[0]
+                    if sub is not None:
+                        pass
+                    else:
+                        # We report the original error
+                        self._downloader.report_warning(sub_error)
+
+        if self._downloader.params.get('allsubtitles', False):
+            video_subtitles = self._extract_all_subtitles(video_id)
+            for video_subtitle in video_subtitles:
+                (sub_error, sub_lang, sub) = video_subtitle
+                if sub_error:
+                    self._downloader.report_warning(sub_error)
+
+        if self._downloader.params.get('listsubtitles', False):
+            self._list_available_subtitles(video_id)
+            return
+
+        if 'length_seconds' not in video_info:
+            self._downloader.report_warning(u'unable to extract video duration')
+            video_duration = ''
+        else:
+            video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0])
+
+        # Decide which formats to download
+        req_format = self._downloader.params.get('format', None)
+
+        try:
+            mobj = re.search(r';ytplayer.config = ({.*?});', video_webpage)
+            info = json.loads(mobj.group(1))
+            args = info['args']
+            if args.get('ptk','') == 'vevo' or 'dashmpd':
+                # Vevo videos with encrypted signatures
+                self.to_screen(u'%s: Vevo video detected.' % video_id)
+                video_info['url_encoded_fmt_stream_map'] = [args['url_encoded_fmt_stream_map']]
+        except ValueError:
+            pass
+
+        if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
+            self.report_rtmp_download()
+            video_url_list = [(None, video_info['conn'][0])]
+        elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
+            url_map = {}
+            for url_data_str in video_info['url_encoded_fmt_stream_map'][0].split(','):
+                url_data = compat_parse_qs(url_data_str)
+                if 'itag' in url_data and 'url' in url_data:
+                    url = url_data['url'][0]
+                    if 'sig' in url_data:
+                        url += '&signature=' + url_data['sig'][0]
+                    elif 's' in url_data:
+                        signature = self._decrypt_signature(url_data['s'][0])
+                        url += '&signature=' + signature
+                    if 'ratebypass' not in url:
+                        url += '&ratebypass=yes'
+                    url_map[url_data['itag'][0]] = url
+
+            format_limit = self._downloader.params.get('format_limit', None)
+            available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats
+            if format_limit is not None and format_limit in available_formats:
+                format_list = available_formats[available_formats.index(format_limit):]
+            else:
+                format_list = available_formats
+            existing_formats = [x for x in format_list if x in url_map]
+            if len(existing_formats) == 0:
+                raise ExtractorError(u'no known formats available for video')
+            if self._downloader.params.get('listformats', None):
+                self._print_formats(existing_formats)
+                return
+            if req_format is None or req_format == 'best':
+                video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
+            elif req_format == 'worst':
+                video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
+            elif req_format in ('-1', 'all'):
+                video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
+            else:
+                # Specific formats. We pick the first in a slash-delimeted sequence.
+                # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'.
+                req_formats = req_format.split('/')
+                video_url_list = None
+                for rf in req_formats:
+                    if rf in url_map:
+                        video_url_list = [(rf, url_map[rf])]
+                        break
+                if video_url_list is None:
+                    raise ExtractorError(u'requested format not available')
+        else:
+            raise ExtractorError(u'no conn or url_encoded_fmt_stream_map information found in video info')
+
+        results = []
+        for format_param, video_real_url in video_url_list:
+            # Extension
+            video_extension = self._video_extensions.get(format_param, 'flv')
+
+            video_format = '{0} - {1}'.format(format_param if format_param else video_extension,
+                                              self._video_dimensions.get(format_param, '???'))
+
+            results.append({
+                'id':       video_id,
+                'url':      video_real_url,
+                'uploader': video_uploader,
+                'uploader_id': video_uploader_id,
+                'upload_date':  upload_date,
+                'title':    video_title,
+                'ext':      video_extension,
+                'format':   video_format,
+                'thumbnail':    video_thumbnail,
+                'description':  video_description,
+                'player_url':   player_url,
+                'subtitles':    video_subtitles,
+                'duration':     video_duration
+            })
+        return results
+
+class YoutubePlaylistIE(InfoExtractor):
+    """Information Extractor for YouTube playlists."""
+
+    _VALID_URL = r"""(?:
+                        (?:https?://)?
+                        (?:\w+\.)?
+                        youtube\.com/
+                        (?:
+                           (?:course|view_play_list|my_playlists|artist|playlist|watch)
+                           \? (?:.*?&)*? (?:p|a|list)=
+                        |  p/
+                        )
+                        ((?:PL|EC|UU)?[0-9A-Za-z-_]{10,})
+                        .*
+                     |
+                        ((?:PL|EC|UU)[0-9A-Za-z-_]{10,})
+                     )"""
+    _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/playlists/%s?max-results=%i&start-index=%i&v=2&alt=json&safeSearch=none'
+    _MAX_RESULTS = 50
+    IE_NAME = u'youtube:playlist'
+
+    @classmethod
+    def suitable(cls, url):
+        """Receives a URL and returns True if suitable for this IE."""
+        return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
+
+    def _real_extract(self, url):
+        # Extract playlist id
+        mobj = re.match(self._VALID_URL, url, re.VERBOSE)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        # Download playlist videos from API
+        playlist_id = mobj.group(1) or mobj.group(2)
+        page_num = 1
+        videos = []
+
+        while True:
+            url = self._TEMPLATE_URL % (playlist_id, self._MAX_RESULTS, self._MAX_RESULTS * (page_num - 1) + 1)
+            page = self._download_webpage(url, playlist_id, u'Downloading page #%s' % page_num)
+
+            try:
+                response = json.loads(page)
+            except ValueError as err:
+                raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err))
+
+            if 'feed' not in response:
+                raise ExtractorError(u'Got a malformed response from YouTube API')
+            playlist_title = response['feed']['title']['$t']
+            if 'entry' not in response['feed']:
+                # Number of videos is a multiple of self._MAX_RESULTS
+                break
+
+            for entry in response['feed']['entry']:
+                index = entry['yt$position']['$t']
+                if 'media$group' in entry and 'media$player' in entry['media$group']:
+                    videos.append((index, entry['media$group']['media$player']['url']))
+
+            if len(response['feed']['entry']) < self._MAX_RESULTS:
+                break
+            page_num += 1
+
+        videos = [v[1] for v in sorted(videos)]
+
+        url_results = [self.url_result(url, 'Youtube') for url in videos]
+        return [self.playlist_result(url_results, playlist_id, playlist_title)]
+
+
+class YoutubeChannelIE(InfoExtractor):
+    """Information Extractor for YouTube channels."""
+
+    _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)"
+    _TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en'
+    _MORE_PAGES_INDICATOR = 'yt-uix-load-more'
+    _MORE_PAGES_URL = 'http://www.youtube.com/channel_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s'
+    IE_NAME = u'youtube:channel'
+
+    def extract_videos_from_page(self, page):
+        ids_in_page = []
+        for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&?', page):
+            if mobj.group(1) not in ids_in_page:
+                ids_in_page.append(mobj.group(1))
+        return ids_in_page
+
+    def _real_extract(self, url):
+        # Extract channel id
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        # Download channel page
+        channel_id = mobj.group(1)
+        video_ids = []
+        pagenum = 1
+
+        url = self._TEMPLATE_URL % (channel_id, pagenum)
+        page = self._download_webpage(url, channel_id,
+                                      u'Downloading page #%s' % pagenum)
+
+        # Extract video identifiers
+        ids_in_page = self.extract_videos_from_page(page)
+        video_ids.extend(ids_in_page)
+
+        # Download any subsequent channel pages using the json-based channel_ajax query
+        if self._MORE_PAGES_INDICATOR in page:
+            while True:
+                pagenum = pagenum + 1
+
+                url = self._MORE_PAGES_URL % (pagenum, channel_id)
+                page = self._download_webpage(url, channel_id,
+                                              u'Downloading page #%s' % pagenum)
+
+                page = json.loads(page)
+
+                ids_in_page = self.extract_videos_from_page(page['content_html'])
+                video_ids.extend(ids_in_page)
+
+                if self._MORE_PAGES_INDICATOR  not in page['load_more_widget_html']:
+                    break
+
+        self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
+
+        urls = ['http://www.youtube.com/watch?v=%s' % id for id in video_ids]
+        url_entries = [self.url_result(url, 'Youtube') for url in urls]
+        return [self.playlist_result(url_entries, channel_id)]
+
+
+class YoutubeUserIE(InfoExtractor):
+    """Information Extractor for YouTube users."""
+
+    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)'
+    _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
+    _GDATA_PAGE_SIZE = 50
+    _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
+    _VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]'
+    IE_NAME = u'youtube:user'
+
+    def _real_extract(self, url):
+        # Extract username
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+
+        username = mobj.group(1)
+
+        # Download video ids using YouTube Data API. Result size per
+        # query is limited (currently to 50 videos) so we need to query
+        # page by page until there are no video ids - it means we got
+        # all of them.
+
+        video_ids = []
+        pagenum = 0
+
+        while True:
+            start_index = pagenum * self._GDATA_PAGE_SIZE + 1
+
+            gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
+            page = self._download_webpage(gdata_url, username,
+                                          u'Downloading video ids from %d to %d' % (start_index, start_index + self._GDATA_PAGE_SIZE))
+
+            # Extract video identifiers
+            ids_in_page = []
+
+            for mobj in re.finditer(self._VIDEO_INDICATOR, page):
+                if mobj.group(1) not in ids_in_page:
+                    ids_in_page.append(mobj.group(1))
+
+            video_ids.extend(ids_in_page)
+
+            # A little optimization - if current page is not
+            # "full", ie. does not contain PAGE_SIZE video ids then
+            # we can assume that this page is the last one - there
+            # are no more ids on further pages - no need to query
+            # again.
+
+            if len(ids_in_page) < self._GDATA_PAGE_SIZE:
+                break
+
+            pagenum += 1
+
+        urls = ['http://www.youtube.com/watch?v=%s' % video_id for video_id in video_ids]
+        url_results = [self.url_result(url, 'Youtube') for url in urls]
+        return [self.playlist_result(url_results, playlist_title = username)]
+
+class YoutubeSearchIE(SearchInfoExtractor):
+    """Information Extractor for YouTube search queries."""
+    _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
+    _MAX_RESULTS = 1000
+    IE_NAME = u'youtube:search'
+    _SEARCH_KEY = 'ytsearch'
+
+    def report_download_page(self, query, pagenum):
+        """Report attempt to download search page with given number."""
+        self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
+
+    def _get_n_results(self, query, n):
+        """Get a specified number of results for a query"""
+
+        video_ids = []
+        pagenum = 0
+        limit = n
+
+        while (50 * pagenum) < limit:
+            self.report_download_page(query, pagenum+1)
+            result_url = self._API_URL % (compat_urllib_parse.quote_plus(query), (50*pagenum)+1)
+            request = compat_urllib_request.Request(result_url)
+            try:
+                data = compat_urllib_request.urlopen(request).read().decode('utf-8')
+            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+                raise ExtractorError(u'Unable to download API page: %s' % compat_str(err))
+            api_response = json.loads(data)['data']
+
+            if not 'items' in api_response:
+                raise ExtractorError(u'[youtube] No video results')
+
+            new_ids = list(video['id'] for video in api_response['items'])
+            video_ids += new_ids
+
+            limit = min(n, api_response['totalItems'])
+            pagenum += 1
+
+        if len(video_ids) > n:
+            video_ids = video_ids[:n]
+        videos = [self.url_result('http://www.youtube.com/watch?v=%s' % id, 'Youtube') for id in video_ids]
+        return self.playlist_result(videos, query)
diff --git a/youtube_dl/extractor/zdf.py b/youtube_dl/extractor/zdf.py
new file mode 100644
index 0000000..418509c
--- /dev/null
+++ b/youtube_dl/extractor/zdf.py
@@ -0,0 +1,65 @@
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    unescapeHTML,
+)
+
+class ZDFIE(InfoExtractor):
+    _VALID_URL = r'^http://www\.zdf\.de\/ZDFmediathek\/(.*beitrag\/video\/)(?P<video_id>[^/\?]+)(?:\?.*)?'
+    _TITLE = r'<h1(?: class="beitragHeadline")?>(?P<title>.*)</h1>'
+    _MEDIA_STREAM = r'<a href="(?P<video_url>.+(?P<media_type>.streaming).+/zdf/(?P<quality>[^\/]+)/[^"]*)".+class="play".+>'
+    _MMS_STREAM = r'href="(?P<video_url>mms://[^"]*)"'
+    _RTSP_STREAM = r'(?P<video_url>rtsp://[^"]*.mp4)'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        if mobj is None:
+            raise ExtractorError(u'Invalid URL: %s' % url)
+        video_id = mobj.group('video_id')
+
+        html = self._download_webpage(url, video_id)
+        streams = [m.groupdict() for m in re.finditer(self._MEDIA_STREAM, html)]
+        if streams is None:
+            raise ExtractorError(u'No media url found.')
+
+        # s['media_type'] == 'wstreaming' -> use 'Windows Media Player' and mms url
+        # s['media_type'] == 'hstreaming' -> use 'Quicktime' and rtsp url
+        # choose first/default media type and highest quality for now
+        for s in streams:        #find 300 - dsl1000mbit
+            if s['quality'] == '300' and s['media_type'] == 'wstreaming':
+                stream_=s
+                break
+        for s in streams:        #find veryhigh - dsl2000mbit
+            if s['quality'] == 'veryhigh' and s['media_type'] == 'wstreaming': # 'hstreaming' - rtsp is not working
+                stream_=s
+                break
+        if stream_ is None:
+            raise ExtractorError(u'No stream found.')
+
+        media_link = self._download_webpage(stream_['video_url'], video_id,'Get stream URL')
+
+        self.report_extraction(video_id)
+        mobj = re.search(self._TITLE, html)
+        if mobj is None:
+            raise ExtractorError(u'Cannot extract title')
+        title = unescapeHTML(mobj.group('title'))
+
+        mobj = re.search(self._MMS_STREAM, media_link)
+        if mobj is None:
+            mobj = re.search(self._RTSP_STREAM, media_link)
+            if mobj is None:
+                raise ExtractorError(u'Cannot extract mms:// or rtsp:// URL')
+        mms_url = mobj.group('video_url')
+
+        mobj = re.search('(.*)[.](?P<ext>[^.]+)', mms_url)
+        if mobj is None:
+            raise ExtractorError(u'Cannot extract extention')
+        ext = mobj.group('ext')
+
+        return [{'id': video_id,
+                 'url': mms_url,
+                 'title': title,
+                 'ext': ext
+                 }]
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 7c6757e..c037c74 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,2 +1,2 @@
 
-__version__ = '2013.06.21'
+__version__ = '2013.06.26'
-- 
2.41.3