summaryrefslogtreecommitdiff
path: root/youtube_dl/extractor/crunchyroll.py
blob: b92f2544799769abc9690a04d4d1090af435ca79 (plain)
    1 # coding: utf-8
    2 from __future__ import unicode_literals
    3 
    4 import re
    5 import json
    6 import base64
    7 import zlib
    8 
    9 from hashlib import sha1
   10 from math import pow, sqrt, floor
   11 from .common import InfoExtractor
   12 from ..compat import (
   13     compat_etree_fromstring,
   14     compat_urllib_parse_urlencode,
   15     compat_urllib_request,
   16     compat_urlparse,
   17 )
   18 from ..utils import (
   19     ExtractorError,
   20     bytes_to_intlist,
   21     intlist_to_bytes,
   22     int_or_none,
   23     lowercase_escape,
   24     remove_end,
   25     sanitized_Request,
   26     unified_strdate,
   27     urlencode_postdata,
   28     xpath_text,
   29     extract_attributes,
   30 )
   31 from ..aes import (
   32     aes_cbc_decrypt,
   33 )
   34 
   35 
   36 class CrunchyrollBaseIE(InfoExtractor):
   37     _LOGIN_URL = 'https://www.crunchyroll.com/login'
   38     _LOGIN_FORM = 'login_form'
   39     _NETRC_MACHINE = 'crunchyroll'
   40 
   41     def _call_rpc_api(self, method, video_id, note=None, data=None):
   42         data = data or {}
   43         data['req'] = 'RpcApi' + method
   44         data = compat_urllib_parse_urlencode(data).encode('utf-8')
   45         return self._download_xml(
   46             'http://www.crunchyroll.com/xml/',
   47             video_id, note, fatal=False, data=data, headers={
   48                 'Content-Type': 'application/x-www-form-urlencoded',
   49             })
   50 
   51     def _login(self):
   52         (username, password) = self._get_login_info()
   53         if username is None:
   54             return
   55 
   56         self._download_webpage(
   57             'https://www.crunchyroll.com/?a=formhandler',
   58             None, 'Logging in', 'Wrong login info',
   59             data=urlencode_postdata({
   60                 'formname': 'RpcApiUser_Login',
   61                 'next_url': 'https://www.crunchyroll.com/acct/membership',
   62                 'name': username,
   63                 'password': password,
   64             }))
   65 
   66         '''
   67         login_page = self._download_webpage(
   68             self._LOGIN_URL, None, 'Downloading login page')
   69 
   70         def is_logged(webpage):
   71             return '<title>Redirecting' in webpage
   72 
   73         # Already logged in
   74         if is_logged(login_page):
   75             return
   76 
   77         login_form_str = self._search_regex(
   78             r'(?P<form><form[^>]+?id=(["\'])%s\2[^>]*>)' % self._LOGIN_FORM,
   79             login_page, 'login form', group='form')
   80 
   81         post_url = extract_attributes(login_form_str).get('action')
   82         if not post_url:
   83             post_url = self._LOGIN_URL
   84         elif not post_url.startswith('http'):
   85             post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
   86 
   87         login_form = self._form_hidden_inputs(self._LOGIN_FORM, login_page)
   88 
   89         login_form.update({
   90             'login_form[name]': username,
   91             'login_form[password]': password,
   92         })
   93 
   94         response = self._download_webpage(
   95             post_url, None, 'Logging in', 'Wrong login info',
   96             data=urlencode_postdata(login_form),
   97             headers={'Content-Type': 'application/x-www-form-urlencoded'})
   98 
   99         # Successful login
  100         if is_logged(response):
  101             return
  102 
  103         error = self._html_search_regex(
  104             '(?s)<ul[^>]+class=["\']messages["\'][^>]*>(.+?)</ul>',
  105             response, 'error message', default=None)
  106         if error:
  107             raise ExtractorError('Unable to login: %s' % error, expected=True)
  108 
  109         raise ExtractorError('Unable to log in')
  110         '''
  111 
  112     def _real_initialize(self):
  113         self._login()
  114 
  115     def _download_webpage(self, url_or_request, *args, **kwargs):
  116         request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request)
  117                    else sanitized_Request(url_or_request))
  118         # Accept-Language must be set explicitly to accept any language to avoid issues
  119         # similar to https://github.com/rg3/youtube-dl/issues/6797.
  120         # Along with IP address Crunchyroll uses Accept-Language to guess whether georestriction
  121         # should be imposed or not (from what I can see it just takes the first language
  122         # ignoring the priority and requires it to correspond the IP). By the way this causes
  123         # Crunchyroll to not work in georestriction cases in some browsers that don't place
  124         # the locale lang first in header. However allowing any language seems to workaround the issue.
  125         request.add_header('Accept-Language', '*')
  126         return super(CrunchyrollBaseIE, self)._download_webpage(request, *args, **kwargs)
  127 
  128     @staticmethod
  129     def _add_skip_wall(url):
  130         parsed_url = compat_urlparse.urlparse(url)
  131         qs = compat_urlparse.parse_qs(parsed_url.query)
  132         # Always force skip_wall to bypass maturity wall, namely 18+ confirmation message:
  133         # > This content may be inappropriate for some people.
  134         # > Are you sure you want to continue?
  135         # since it's not disabled by default in crunchyroll account's settings.
  136         # See https://github.com/rg3/youtube-dl/issues/7202.
  137         qs['skip_wall'] = ['1']
  138         return compat_urlparse.urlunparse(
  139             parsed_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
  140 
  141 
  142 class CrunchyrollIE(CrunchyrollBaseIE):
  143     _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:media(?:-|/\?id=)|[^/]*/[^/?&]*?)(?P<video_id>[0-9]+))(?:[/?&]|$)'
  144     _TESTS = [{
  145         'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
  146         'info_dict': {
  147             'id': '645513',
  148             'ext': 'mp4',
  149             'title': 'Wanna be the Strongest in the World Episode 1 – An Idol-Wrestler is Born!',
  150             'description': 'md5:2d17137920c64f2f49981a7797d275ef',
  151             'thumbnail': 'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg',
  152             'uploader': 'Yomiuri Telecasting Corporation (YTV)',
  153             'upload_date': '20131013',
  154             'url': 're:(?!.*&amp)',
  155         },
  156         'params': {
  157             # rtmp
  158             'skip_download': True,
  159         },
  160     }, {
  161         'url': 'http://www.crunchyroll.com/media-589804/culture-japan-1',
  162         'info_dict': {
  163             'id': '589804',
  164             'ext': 'flv',
  165             'title': 'Culture Japan Episode 1 – Rebuilding Japan after the 3.11',
  166             'description': 'md5:2fbc01f90b87e8e9137296f37b461c12',
  167             'thumbnail': r're:^https?://.*\.jpg$',
  168             'uploader': 'Danny Choo Network',
  169             'upload_date': '20120213',
  170         },
  171         'params': {
  172             # rtmp
  173             'skip_download': True,
  174         },
  175         'skip': 'Video gone',
  176     }, {
  177         'url': 'http://www.crunchyroll.com/rezero-starting-life-in-another-world-/episode-5-the-morning-of-our-promise-is-still-distant-702409',
  178         'info_dict': {
  179             'id': '702409',
  180             'ext': 'mp4',
  181             'title': 'Re:ZERO -Starting Life in Another World- Episode 5 – The Morning of Our Promise Is Still Distant',
  182             'description': 'md5:97664de1ab24bbf77a9c01918cb7dca9',
  183             'thumbnail': r're:^https?://.*\.jpg$',
  184             'uploader': 'TV TOKYO',
  185             'upload_date': '20160508',
  186         },
  187         'params': {
  188             # m3u8 download
  189             'skip_download': True,
  190         },
  191     }, {
  192         'url': 'http://www.crunchyroll.com/konosuba-gods-blessing-on-this-wonderful-world/episode-1-give-me-deliverance-from-this-judicial-injustice-727589',
  193         'info_dict': {
  194             'id': '727589',
  195             'ext': 'mp4',
  196             'title': "KONOSUBA -God's blessing on this wonderful world! 2 Episode 1 – Give Me Deliverance From This Judicial Injustice!",
  197             'description': 'md5:cbcf05e528124b0f3a0a419fc805ea7d',
  198             'thumbnail': r're:^https?://.*\.jpg$',
  199             'uploader': 'Kadokawa Pictures Inc.',
  200             'upload_date': '20170118',
  201             'series': "KONOSUBA -God's blessing on this wonderful world!",
  202             'season': "KONOSUBA -God's blessing on this wonderful world! 2",
  203             'season_number': 2,
  204             'episode': 'Give Me Deliverance From This Judicial Injustice!',
  205             'episode_number': 1,
  206         },
  207         'params': {
  208             # m3u8 download
  209             'skip_download': True,
  210         },
  211     }, {
  212         'url': 'http://www.crunchyroll.fr/girl-friend-beta/episode-11-goodbye-la-mode-661697',
  213         'only_matching': True,
  214     }, {
  215         # geo-restricted (US), 18+ maturity wall, non-premium available
  216         'url': 'http://www.crunchyroll.com/cosplay-complex-ova/episode-1-the-birth-of-the-cosplay-club-565617',
  217         'only_matching': True,
  218     }, {
  219         # A description with double quotes
  220         'url': 'http://www.crunchyroll.com/11eyes/episode-1-piros-jszaka-red-night-535080',
  221         'info_dict': {
  222             'id': '535080',
  223             'ext': 'mp4',
  224             'title': '11eyes Episode 1 – Piros éjszaka - Red Night',
  225             'description': 'Kakeru and Yuka are thrown into an alternate nightmarish world they call "Red Night".',
  226             'uploader': 'Marvelous AQL Inc.',
  227             'upload_date': '20091021',
  228         },
  229         'params': {
  230             # Just test metadata extraction
  231             'skip_download': True,
  232         },
  233     }, {
  234         # make sure we can extract an uploader name that's not a link
  235         'url': 'http://www.crunchyroll.com/hakuoki-reimeiroku/episode-1-dawn-of-the-divine-warriors-606899',
  236         'info_dict': {
  237             'id': '606899',
  238             'ext': 'mp4',
  239             'title': 'Hakuoki Reimeiroku Episode 1 – Dawn of the Divine Warriors',
  240             'description': 'Ryunosuke was left to die, but Serizawa-san asked him a simple question "Do you want to live?"',
  241             'uploader': 'Geneon Entertainment',
  242             'upload_date': '20120717',
  243         },
  244         'params': {
  245             # just test metadata extraction
  246             'skip_download': True,
  247         },
  248     }, {
  249         # A video with a vastly different season name compared to the series name
  250         'url': 'http://www.crunchyroll.com/nyarko-san-another-crawling-chaos/episode-1-test-590532',
  251         'info_dict': {
  252             'id': '590532',
  253             'ext': 'mp4',
  254             'title': 'Haiyoru! Nyaruani (ONA) Episode 1 – Test',
  255             'description': 'Mahiro and Nyaruko talk about official certification.',
  256             'uploader': 'TV TOKYO',
  257             'upload_date': '20120305',
  258             'series': 'Nyarko-san: Another Crawling Chaos',
  259             'season': 'Haiyoru! Nyaruani (ONA)',
  260         },
  261         'params': {
  262             # Just test metadata extraction
  263             'skip_download': True,
  264         },
  265     }]
  266 
  267     _FORMAT_IDS = {
  268         '360': ('60', '106'),
  269         '480': ('61', '106'),
  270         '720': ('62', '106'),
  271         '1080': ('80', '108'),
  272     }
  273 
  274     def _decrypt_subtitles(self, data, iv, id):
  275         data = bytes_to_intlist(base64.b64decode(data.encode('utf-8')))
  276         iv = bytes_to_intlist(base64.b64decode(iv.encode('utf-8')))
  277         id = int(id)
  278 
  279         def obfuscate_key_aux(count, modulo, start):
  280             output = list(start)
  281             for _ in range(count):
  282                 output.append(output[-1] + output[-2])
  283             # cut off start values
  284             output = output[2:]
  285             output = list(map(lambda x: x % modulo + 33, output))
  286             return output
  287 
  288         def obfuscate_key(key):
  289             num1 = int(floor(pow(2, 25) * sqrt(6.9)))
  290             num2 = (num1 ^ key) << 5
  291             num3 = key ^ num1
  292             num4 = num3 ^ (num3 >> 3) ^ num2
  293             prefix = intlist_to_bytes(obfuscate_key_aux(20, 97, (1, 2)))
  294             shaHash = bytes_to_intlist(sha1(prefix + str(num4).encode('ascii')).digest())
  295             # Extend 160 Bit hash to 256 Bit
  296             return shaHash + [0] * 12
  297 
  298         key = obfuscate_key(id)
  299 
  300         decrypted_data = intlist_to_bytes(aes_cbc_decrypt(data, key, iv))
  301         return zlib.decompress(decrypted_data)
  302 
  303     def _convert_subtitles_to_srt(self, sub_root):
  304         output = ''
  305 
  306         for i, event in enumerate(sub_root.findall('./events/event'), 1):
  307             start = event.attrib['start'].replace('.', ',')
  308             end = event.attrib['end'].replace('.', ',')
  309             text = event.attrib['text'].replace('\\N', '\n')
  310             output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
  311         return output
  312 
  313     def _convert_subtitles_to_ass(self, sub_root):
  314         output = ''
  315 
  316         def ass_bool(strvalue):
  317             assvalue = '0'
  318             if strvalue == '1':
  319                 assvalue = '-1'
  320             return assvalue
  321 
  322         output = '[Script Info]\n'
  323         output += 'Title: %s\n' % sub_root.attrib['title']
  324         output += 'ScriptType: v4.00+\n'
  325         output += 'WrapStyle: %s\n' % sub_root.attrib['wrap_style']
  326         output += 'PlayResX: %s\n' % sub_root.attrib['play_res_x']
  327         output += 'PlayResY: %s\n' % sub_root.attrib['play_res_y']
  328         output += """
  329 [V4+ Styles]
  330 Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
  331 """
  332         for style in sub_root.findall('./styles/style'):
  333             output += 'Style: ' + style.attrib['name']
  334             output += ',' + style.attrib['font_name']
  335             output += ',' + style.attrib['font_size']
  336             output += ',' + style.attrib['primary_colour']
  337             output += ',' + style.attrib['secondary_colour']
  338             output += ',' + style.attrib['outline_colour']
  339             output += ',' + style.attrib['back_colour']
  340             output += ',' + ass_bool(style.attrib['bold'])
  341             output += ',' + ass_bool(style.attrib['italic'])
  342             output += ',' + ass_bool(style.attrib['underline'])
  343             output += ',' + ass_bool(style.attrib['strikeout'])
  344             output += ',' + style.attrib['scale_x']
  345             output += ',' + style.attrib['scale_y']
  346             output += ',' + style.attrib['spacing']
  347             output += ',' + style.attrib['angle']
  348             output += ',' + style.attrib['border_style']
  349             output += ',' + style.attrib['outline']
  350             output += ',' + style.attrib['shadow']
  351             output += ',' + style.attrib['alignment']
  352             output += ',' + style.attrib['margin_l']
  353             output += ',' + style.attrib['margin_r']
  354             output += ',' + style.attrib['margin_v']
  355             output += ',' + style.attrib['encoding']
  356             output += '\n'
  357 
  358         output += """
  359 [Events]
  360 Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
  361 """
  362         for event in sub_root.findall('./events/event'):
  363             output += 'Dialogue: 0'
  364             output += ',' + event.attrib['start']
  365             output += ',' + event.attrib['end']
  366             output += ',' + event.attrib['style']
  367             output += ',' + event.attrib['name']
  368             output += ',' + event.attrib['margin_l']
  369             output += ',' + event.attrib['margin_r']
  370             output += ',' + event.attrib['margin_v']
  371             output += ',' + event.attrib['effect']
  372             output += ',' + event.attrib['text']
  373             output += '\n'
  374 
  375         return output
  376 
  377     def _extract_subtitles(self, subtitle):
  378         sub_root = compat_etree_fromstring(subtitle)
  379         return [{
  380             'ext': 'srt',
  381             'data': self._convert_subtitles_to_srt(sub_root),
  382         }, {
  383             'ext': 'ass',
  384             'data': self._convert_subtitles_to_ass(sub_root),
  385         }]
  386 
  387     def _get_subtitles(self, video_id, webpage):
  388         subtitles = {}
  389         for sub_id, sub_name in re.findall(r'\bssid=([0-9]+)"[^>]+?\btitle="([^"]+)', webpage):
  390             sub_doc = self._call_rpc_api(
  391                 'Subtitle_GetXml', video_id,
  392                 'Downloading subtitles for ' + sub_name, data={
  393                     'subtitle_script_id': sub_id,
  394                 })
  395             if sub_doc is None:
  396                 continue
  397             sid = sub_doc.get('id')
  398             iv = xpath_text(sub_doc, 'iv', 'subtitle iv')
  399             data = xpath_text(sub_doc, 'data', 'subtitle data')
  400             if not sid or not iv or not data:
  401                 continue
  402             subtitle = self._decrypt_subtitles(data, iv, sid).decode('utf-8')
  403             lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
  404             if not lang_code:
  405                 continue
  406             subtitles[lang_code] = self._extract_subtitles(subtitle)
  407         return subtitles
  408 
  409     def _real_extract(self, url):
  410         mobj = re.match(self._VALID_URL, url)
  411         video_id = mobj.group('video_id')
  412 
  413         if mobj.group('prefix') == 'm':
  414             mobile_webpage = self._download_webpage(url, video_id, 'Downloading mobile webpage')
  415             webpage_url = self._search_regex(r'<link rel="canonical" href="([^"]+)" />', mobile_webpage, 'webpage_url')
  416         else:
  417             webpage_url = 'http://www.' + mobj.group('url')
  418 
  419         webpage = self._download_webpage(
  420             self._add_skip_wall(webpage_url), video_id,
  421             headers=self.geo_verification_headers())
  422         note_m = self._html_search_regex(
  423             r'<div class="showmedia-trailer-notice">(.+?)</div>',
  424             webpage, 'trailer-notice', default='')
  425         if note_m:
  426             raise ExtractorError(note_m)
  427 
  428         mobj = re.search(r'Page\.messaging_box_controller\.addItems\(\[(?P<msg>{.+?})\]\)', webpage)
  429         if mobj:
  430             msg = json.loads(mobj.group('msg'))
  431             if msg.get('type') == 'error':
  432                 raise ExtractorError('crunchyroll returned error: %s' % msg['message_body'], expected=True)
  433 
  434         if 'To view this, please log in to verify you are 18 or older.' in webpage:
  435             self.raise_login_required()
  436 
  437         video_title = self._html_search_regex(
  438             r'(?s)<h1[^>]*>((?:(?!<h1).)*?<span[^>]+itemprop=["\']title["\'][^>]*>(?:(?!<h1).)+?)</h1>',
  439             webpage, 'video_title')
  440         video_title = re.sub(r' {2,}', ' ', video_title)
  441         video_description = self._parse_json(self._html_search_regex(
  442             r'<script[^>]*>\s*.+?\[media_id=%s\].+?({.+?"description"\s*:.+?})\);' % video_id,
  443             webpage, 'description', default='{}'), video_id).get('description')
  444         if video_description:
  445             video_description = lowercase_escape(video_description.replace(r'\r\n', '\n'))
  446         video_upload_date = self._html_search_regex(
  447             [r'<div>Availability for free users:(.+?)</div>', r'<div>[^<>]+<span>\s*(.+?\d{4})\s*</span></div>'],
  448             webpage, 'video_upload_date', fatal=False, flags=re.DOTALL)
  449         if video_upload_date:
  450             video_upload_date = unified_strdate(video_upload_date)
  451         video_uploader = self._html_search_regex(
  452             # try looking for both an uploader that's a link and one that's not
  453             [r'<a[^>]+href="/publisher/[^"]+"[^>]*>([^<]+)</a>', r'<div>\s*Publisher:\s*<span>\s*(.+?)\s*</span>\s*</div>'],
  454             webpage, 'video_uploader', fatal=False)
  455 
  456         available_fmts = []
  457         for a, fmt in re.findall(r'(<a[^>]+token=["\']showmedia\.([0-9]{3,4})p["\'][^>]+>)', webpage):
  458             attrs = extract_attributes(a)
  459             href = attrs.get('href')
  460             if href and '/freetrial' in href:
  461                 continue
  462             available_fmts.append(fmt)
  463         if not available_fmts:
  464             for p in (r'token=["\']showmedia\.([0-9]{3,4})p"', r'showmedia\.([0-9]{3,4})p'):
  465                 available_fmts = re.findall(p, webpage)
  466                 if available_fmts:
  467                     break
  468         video_encode_ids = []
  469         formats = []
  470         for fmt in available_fmts:
  471             stream_quality, stream_format = self._FORMAT_IDS[fmt]
  472             video_format = fmt + 'p'
  473             stream_infos = []
  474             streamdata = self._call_rpc_api(
  475                 'VideoPlayer_GetStandardConfig', video_id,
  476                 'Downloading media info for %s' % video_format, data={
  477                     'media_id': video_id,
  478                     'video_format': stream_format,
  479                     'video_quality': stream_quality,
  480                     'current_page': url,
  481                 })
  482             if streamdata is not None:
  483                 stream_info = streamdata.find('./{default}preload/stream_info')
  484                 if stream_info is not None:
  485                     stream_infos.append(stream_info)
  486             stream_info = self._call_rpc_api(
  487                 'VideoEncode_GetStreamInfo', video_id,
  488                 'Downloading stream info for %s' % video_format, data={
  489                     'media_id': video_id,
  490                     'video_format': stream_format,
  491                     'video_encode_quality': stream_quality,
  492                 })
  493             if stream_info is not None:
  494                 stream_infos.append(stream_info)
  495             for stream_info in stream_infos:
  496                 video_encode_id = xpath_text(stream_info, './video_encode_id')
  497                 if video_encode_id in video_encode_ids:
  498                     continue
  499                 video_encode_ids.append(video_encode_id)
  500 
  501                 video_file = xpath_text(stream_info, './file')
  502                 if not video_file:
  503                     continue
  504                 if video_file.startswith('http'):
  505                     formats.extend(self._extract_m3u8_formats(
  506                         video_file, video_id, 'mp4', entry_protocol='m3u8_native',
  507                         m3u8_id='hls', fatal=False))
  508                     continue
  509 
  510                 video_url = xpath_text(stream_info, './host')
  511                 if not video_url:
  512                     continue
  513                 metadata = stream_info.find('./metadata')
  514                 format_info = {
  515                     'format': video_format,
  516                     'height': int_or_none(xpath_text(metadata, './height')),
  517                     'width': int_or_none(xpath_text(metadata, './width')),
  518                 }
  519 
  520                 if '.fplive.net/' in video_url:
  521                     video_url = re.sub(r'^rtmpe?://', 'http://', video_url.strip())
  522                     parsed_video_url = compat_urlparse.urlparse(video_url)
  523                     direct_video_url = compat_urlparse.urlunparse(parsed_video_url._replace(
  524                         netloc='v.lvlt.crcdn.net',
  525                         path='%s/%s' % (remove_end(parsed_video_url.path, '/'), video_file.split(':')[-1])))
  526                     if self._is_valid_url(direct_video_url, video_id, video_format):
  527                         format_info.update({
  528                             'format_id': 'http-' + video_format,
  529                             'url': direct_video_url,
  530                         })
  531                         formats.append(format_info)
  532                         continue
  533 
  534                 format_info.update({
  535                     'format_id': 'rtmp-' + video_format,
  536                     'url': video_url,
  537                     'play_path': video_file,
  538                     'ext': 'flv',
  539                 })
  540                 formats.append(format_info)
  541         self._sort_formats(formats, ('height', 'width', 'tbr', 'fps'))
  542 
  543         metadata = self._call_rpc_api(
  544             'VideoPlayer_GetMediaMetadata', video_id,
  545             note='Downloading media info', data={
  546                 'media_id': video_id,
  547             })
  548 
  549         subtitles = self.extract_subtitles(video_id, webpage)
  550 
  551         # webpage provide more accurate data than series_title from XML
  552         series = self._html_search_regex(
  553             r'(?s)<h\d[^>]+\bid=["\']showmedia_about_episode_num[^>]+>(.+?)</h\d',
  554             webpage, 'series', fatal=False)
  555         season = xpath_text(metadata, 'series_title')
  556 
  557         episode = xpath_text(metadata, 'episode_title')
  558         episode_number = int_or_none(xpath_text(metadata, 'episode_number'))
  559 
  560         season_number = int_or_none(self._search_regex(
  561             r'(?s)<h\d[^>]+id=["\']showmedia_about_episode_num[^>]+>.+?</h\d>\s*<h4>\s*Season (\d+)',
  562             webpage, 'season number', default=None))
  563 
  564         return {
  565             'id': video_id,
  566             'title': video_title,
  567             'description': video_description,
  568             'thumbnail': xpath_text(metadata, 'episode_image_url'),
  569             'uploader': video_uploader,
  570             'upload_date': video_upload_date,
  571             'series': series,
  572             'season': season,
  573             'season_number': season_number,
  574             'episode': episode,
  575             'episode_number': episode_number,
  576             'subtitles': subtitles,
  577             'formats': formats,
  578         }
  579 
  580 
  581 class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
  582     IE_NAME = 'crunchyroll:playlist'
  583     _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?(?:\?|$)'
  584 
  585     _TESTS = [{
  586         'url': 'http://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
  587         'info_dict': {
  588             'id': 'a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
  589             'title': 'A Bridge to the Starry Skies - Hoshizora e Kakaru Hashi'
  590         },
  591         'playlist_count': 13,
  592     }, {
  593         # geo-restricted (US), 18+ maturity wall, non-premium available
  594         'url': 'http://www.crunchyroll.com/cosplay-complex-ova',
  595         'info_dict': {
  596             'id': 'cosplay-complex-ova',
  597             'title': 'Cosplay Complex OVA'
  598         },
  599         'playlist_count': 3,
  600         'skip': 'Georestricted',
  601     }, {
  602         # geo-restricted (US), 18+ maturity wall, non-premium will be available since 2015.11.14
  603         'url': 'http://www.crunchyroll.com/ladies-versus-butlers?skip_wall=1',
  604         'only_matching': True,
  605     }]
  606 
  607     def _real_extract(self, url):
  608         show_id = self._match_id(url)
  609 
  610         webpage = self._download_webpage(
  611             self._add_skip_wall(url), show_id,
  612             headers=self.geo_verification_headers())
  613         title = self._html_search_regex(
  614             r'(?s)<h1[^>]*>\s*<span itemprop="name">(.*?)</span>',
  615             webpage, 'title')
  616         episode_paths = re.findall(
  617             r'(?s)<li id="showview_videos_media_(\d+)"[^>]+>.*?<a href="([^"]+)"',
  618             webpage)
  619         entries = [
  620             self.url_result('http://www.crunchyroll.com' + ep, 'Crunchyroll', ep_id)
  621             for ep_id, ep in episode_paths
  622         ]
  623         entries.reverse()
  624 
  625         return {
  626             '_type': 'playlist',
  627             'id': show_id,
  628             'title': title,
  629             'entries': entries,
  630         }

Generated by cgit