-
This commit is contained in:
@@ -0,0 +1,16 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
|
||||
Copyright (C) 2016-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from .provider import Provider
|
||||
|
||||
|
||||
__all__ = ('Provider',)
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,18 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
|
||||
Copyright (C) 2016-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from .player_client import YouTubePlayerClient
|
||||
|
||||
|
||||
__all__ = (
|
||||
'YouTubePlayerClient',
|
||||
)
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,313 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
|
||||
Copyright (C) 2016-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from .request_client import YouTubeRequestClient
|
||||
from ..youtube_exceptions import InvalidGrant, LoginException
|
||||
from ...kodion import logging
|
||||
|
||||
|
||||
class YouTubeLoginClient(YouTubeRequestClient):
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
DOMAIN_SUFFIX = '.apps.googleusercontent.com'
|
||||
DEVICE_CODE_URL = 'https://accounts.google.com/o/oauth2/device/code'
|
||||
REVOKE_URL = 'https://accounts.google.com/o/oauth2/revoke'
|
||||
TOKEN_URL = 'https://www.googleapis.com/oauth2/v4/token'
|
||||
TOKEN_TYPES = {
|
||||
0: 'tv',
|
||||
'tv': 0,
|
||||
1: 'user',
|
||||
'user': 1,
|
||||
2: 'vr',
|
||||
'vr': 2,
|
||||
3: 'dev',
|
||||
'dev': 3,
|
||||
}
|
||||
|
||||
_configs = {
|
||||
'dev': {},
|
||||
'user': {},
|
||||
'tv': {},
|
||||
'vr': {},
|
||||
}
|
||||
_access_tokens = {
|
||||
'dev': None,
|
||||
'user': None,
|
||||
'tv': None,
|
||||
'vr': None,
|
||||
}
|
||||
_initialised = False
|
||||
_logged_in = False
|
||||
|
||||
def __init__(self,
|
||||
configs=None,
|
||||
access_tokens=None,
|
||||
**kwargs):
|
||||
super(YouTubeLoginClient, self).__init__(
|
||||
exc_type=LoginException,
|
||||
**kwargs
|
||||
)
|
||||
YouTubeLoginClient.init(configs)
|
||||
self.set_access_token(access_tokens)
|
||||
self.initialised = any(self._configs.values())
|
||||
|
||||
@classmethod
|
||||
def init(cls, configs=None, **_kwargs):
|
||||
_configs = cls._configs
|
||||
if not configs:
|
||||
return
|
||||
for config_type, config in configs.items():
|
||||
if config_type in _configs:
|
||||
_configs[config_type] = config
|
||||
|
||||
def reinit(self, **kwargs):
|
||||
super(YouTubeLoginClient, self).reinit(**kwargs)
|
||||
|
||||
def set_access_token(self, access_tokens=None):
|
||||
existing_access_tokens = type(self)._access_tokens
|
||||
if access_tokens:
|
||||
token_status = 0
|
||||
for token_type, token in existing_access_tokens.items():
|
||||
if token_type in access_tokens:
|
||||
token = access_tokens[token_type]
|
||||
existing_access_tokens[token_type] = token
|
||||
if token or token_type == 'dev':
|
||||
token_status |= 1
|
||||
else:
|
||||
token_status |= 2
|
||||
|
||||
self.logged_in = (
|
||||
'partially'
|
||||
if token_status & 2 else
|
||||
'fully'
|
||||
if token_status & 1 else
|
||||
False
|
||||
)
|
||||
self.log.info('User is %s logged in', self.logged_in or 'not')
|
||||
else:
|
||||
for token_type in existing_access_tokens:
|
||||
existing_access_tokens[token_type] = None
|
||||
self.logged_in = False
|
||||
self.log.info('User is not logged in')
|
||||
|
||||
@property
|
||||
def initialised(self):
|
||||
return self._initialised
|
||||
|
||||
@initialised.setter
|
||||
def initialised(self, value):
|
||||
type(self)._initialised = value
|
||||
|
||||
@property
|
||||
def logged_in(self):
|
||||
return self._logged_in
|
||||
|
||||
@logged_in.setter
|
||||
def logged_in(self, value):
|
||||
type(self)._logged_in = value
|
||||
|
||||
@staticmethod
|
||||
def _login_error_hook(**kwargs):
|
||||
json_data = getattr(kwargs['exc'], 'json_data', None)
|
||||
if not json_data or 'error' not in json_data:
|
||||
return None, None, None, None, LoginException
|
||||
if json_data['error'] == 'authorization_pending':
|
||||
return None, None, None, json_data, False
|
||||
if (json_data['error'] == 'invalid_grant'
|
||||
and json_data.get('code') == 400):
|
||||
return None, None, None, json_data, InvalidGrant(json_data)
|
||||
return None, None, None, json_data, LoginException(json_data)
|
||||
|
||||
def revoke(self, refresh_token):
|
||||
# https://developers.google.com/youtube/v3/guides/auth/devices
|
||||
headers = {'Host': 'accounts.google.com',
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'
|
||||
' AppleWebKit/537.36 (KHTML, like Gecko)'
|
||||
' Chrome/61.0.3163.100 Safari/537.36',
|
||||
'Content-Type': 'application/x-www-form-urlencoded'}
|
||||
|
||||
post_data = {'token': refresh_token}
|
||||
|
||||
self.request(
|
||||
self.REVOKE_URL,
|
||||
method='POST',
|
||||
data=post_data,
|
||||
headers=headers,
|
||||
response_hook=self._response_hook_json,
|
||||
error_hook=self._login_error_hook,
|
||||
error_title='Logout failed - Refresh token revocation error',
|
||||
raise_exc=True,
|
||||
)
|
||||
|
||||
def refresh_token(self, token_type, refresh_token=None):
|
||||
login_type = self.TOKEN_TYPES.get(token_type)
|
||||
config = self._configs.get(login_type)
|
||||
if config:
|
||||
client_id = config.get('id')
|
||||
client_secret = config.get('secret')
|
||||
else:
|
||||
return None
|
||||
if not client_id or not client_secret or not refresh_token:
|
||||
return None
|
||||
|
||||
# https://developers.google.com/youtube/v3/guides/auth/devices
|
||||
headers = {'Host': 'www.googleapis.com',
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'
|
||||
' AppleWebKit/537.36 (KHTML, like Gecko)'
|
||||
' Chrome/61.0.3163.100 Safari/537.36',
|
||||
'Content-Type': 'application/x-www-form-urlencoded'}
|
||||
|
||||
post_data = {'client_id': client_id,
|
||||
'client_secret': client_secret,
|
||||
'refresh_token': refresh_token,
|
||||
'grant_type': 'refresh_token'}
|
||||
|
||||
client_id.replace(self.DOMAIN_SUFFIX, '')
|
||||
log_info = ('Login type: {login_type!r}',
|
||||
'client_id: {client_id!r}',
|
||||
'client_secret: {client_secret!r}')
|
||||
log_params = {
|
||||
'login_type': login_type,
|
||||
'client_id': '...',
|
||||
'client_secret': '...',
|
||||
}
|
||||
if len(client_id) > 11:
|
||||
log_params['client_id'] = '...'.join((
|
||||
client_id[:3],
|
||||
client_id[-5:],
|
||||
))
|
||||
if len(client_secret) > 9:
|
||||
log_params['client_secret'] = '...'.join((
|
||||
client_secret[:3],
|
||||
client_secret[-3:],
|
||||
))
|
||||
self.log.debug(('Refresh token:',) + log_info, **log_params)
|
||||
|
||||
json_data = self.request(
|
||||
self.TOKEN_URL,
|
||||
method='POST',
|
||||
data=post_data,
|
||||
headers=headers,
|
||||
response_hook=self._response_hook_json,
|
||||
error_hook=self._login_error_hook,
|
||||
error_title='Login failed - Refresh token grant error',
|
||||
error_info=log_info,
|
||||
raise_exc=True,
|
||||
**log_params
|
||||
)
|
||||
return json_data
|
||||
|
||||
def request_access_token(self, token_type, code=None):
|
||||
login_type = self.TOKEN_TYPES.get(token_type)
|
||||
config = self._configs.get(login_type)
|
||||
if config:
|
||||
client_id = config.get('id')
|
||||
client_secret = config.get('secret')
|
||||
else:
|
||||
return None
|
||||
if not client_id or not client_secret or not code:
|
||||
return None
|
||||
|
||||
# https://developers.google.com/youtube/v3/guides/auth/devices
|
||||
headers = {'Host': 'www.googleapis.com',
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'
|
||||
' AppleWebKit/537.36 (KHTML, like Gecko)'
|
||||
' Chrome/61.0.3163.100 Safari/537.36',
|
||||
'Content-Type': 'application/x-www-form-urlencoded'}
|
||||
|
||||
post_data = {'client_id': client_id,
|
||||
'client_secret': client_secret,
|
||||
'code': code,
|
||||
'grant_type': 'http://oauth.net/grant_type/device/1.0'}
|
||||
|
||||
client_id.replace(self.DOMAIN_SUFFIX, '')
|
||||
log_info = ('Login type: {login_type!r}',
|
||||
'client_id: {client_id!r}',
|
||||
'client_secret: {client_secret!r}')
|
||||
log_params = {
|
||||
'login_type': login_type,
|
||||
'client_id': '...',
|
||||
'client_secret': '...',
|
||||
}
|
||||
if len(client_id) > 11:
|
||||
log_params['client_id'] = '...'.join((
|
||||
client_id[:3],
|
||||
client_id[-5:],
|
||||
))
|
||||
if len(client_secret) > 9:
|
||||
log_params['client_secret'] = '...'.join((
|
||||
client_secret[:3],
|
||||
client_secret[-3:],
|
||||
))
|
||||
self.log.debug(('Access token request:',) + log_info, **log_params)
|
||||
|
||||
json_data = self.request(
|
||||
self.TOKEN_URL,
|
||||
method='POST',
|
||||
data=post_data,
|
||||
headers=headers,
|
||||
response_hook=self._response_hook_json,
|
||||
error_hook=self._login_error_hook,
|
||||
error_title='Login failed - Access token request error',
|
||||
error_info=log_info,
|
||||
raise_exc=True,
|
||||
**log_params
|
||||
)
|
||||
return json_data
|
||||
|
||||
def request_device_and_user_code(self, token_type):
|
||||
login_type = self.TOKEN_TYPES.get(token_type)
|
||||
config = self._configs.get(login_type)
|
||||
if config:
|
||||
client_id = config.get('id')
|
||||
else:
|
||||
return None
|
||||
if not client_id:
|
||||
return None
|
||||
|
||||
# https://developers.google.com/youtube/v3/guides/auth/devices
|
||||
headers = {'Host': 'accounts.google.com',
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'
|
||||
' AppleWebKit/537.36 (KHTML, like Gecko)'
|
||||
' Chrome/61.0.3163.100 Safari/537.36',
|
||||
'Content-Type': 'application/x-www-form-urlencoded'}
|
||||
|
||||
post_data = {'client_id': client_id,
|
||||
'scope': 'https://www.googleapis.com/auth/youtube'}
|
||||
|
||||
client_id.replace(self.DOMAIN_SUFFIX, '')
|
||||
log_info = ('Login type: {login_type!r}',
|
||||
'client_id: {client_id!r}')
|
||||
log_params = {
|
||||
'login_type': login_type,
|
||||
'client_id': '...',
|
||||
}
|
||||
if len(client_id) > 11:
|
||||
log_params['client_id'] = '...'.join((
|
||||
client_id[:3],
|
||||
client_id[-5:],
|
||||
))
|
||||
self.log.debug(('Device/user code request:',) + log_info, **log_params)
|
||||
|
||||
json_data = self.request(
|
||||
self.DEVICE_CODE_URL,
|
||||
method='POST',
|
||||
data=post_data,
|
||||
headers=headers,
|
||||
response_hook=self._response_hook_json,
|
||||
error_hook=self._login_error_hook,
|
||||
error_title='Login failed - Device/user code request error',
|
||||
error_info=log_info,
|
||||
raise_exc=True,
|
||||
**log_params
|
||||
)
|
||||
return json_data
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,593 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Copyright (C) 2017-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
import os
|
||||
|
||||
from .request_client import YouTubeRequestClient
|
||||
from ...kodion import logging
|
||||
from ...kodion.compatibility import parse_qs, urlencode, urlsplit, xbmcvfs
|
||||
from ...kodion.constants import (
|
||||
PLAY_PROMPT_SUBTITLES,
|
||||
TEMP_PATH,
|
||||
TRANSLATION_LANGUAGES,
|
||||
)
|
||||
from ...kodion.utils.file_system import make_dirs
|
||||
|
||||
|
||||
SUBTITLE_OPTIONS = {
|
||||
'none': 0,
|
||||
'prompt': 1,
|
||||
'preferred': 2,
|
||||
'fallback': 4,
|
||||
'no_asr': 8,
|
||||
'all': 16,
|
||||
}
|
||||
|
||||
SUBTITLE_SELECTIONS = {
|
||||
0: SUBTITLE_OPTIONS['none'],
|
||||
1: SUBTITLE_OPTIONS['all']
|
||||
+ SUBTITLE_OPTIONS['prompt'],
|
||||
2: SUBTITLE_OPTIONS['preferred']
|
||||
+ SUBTITLE_OPTIONS['fallback'],
|
||||
3: SUBTITLE_OPTIONS['preferred']
|
||||
+ SUBTITLE_OPTIONS['fallback']
|
||||
+ SUBTITLE_OPTIONS['no_asr'],
|
||||
4: SUBTITLE_OPTIONS['preferred']
|
||||
+ SUBTITLE_OPTIONS['no_asr'],
|
||||
5: SUBTITLE_OPTIONS['all'],
|
||||
'none': 0,
|
||||
'prompt': 1,
|
||||
'preferred_fallback_asr': 2,
|
||||
'preferred_fallback': 3,
|
||||
'preferred': 4,
|
||||
'all': 5,
|
||||
}
|
||||
|
||||
|
||||
class Subtitles(YouTubeRequestClient):
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
BASE_PATH = make_dirs(TEMP_PATH)
|
||||
|
||||
FORMATS = {
|
||||
# '_default': None,
|
||||
# '_fallback': None,
|
||||
'srt': {
|
||||
# 'mime_type': 'application/x-subrip',
|
||||
# Fake mimetype to allow ISA to decode as WebVTT
|
||||
'mime_type': 'text/vtt',
|
||||
'extension': 'srt',
|
||||
# Fake @codecs to allow ISA to decode as WebVTT
|
||||
'codec': 'wvtt',
|
||||
},
|
||||
'vtt': {
|
||||
'mime_type': 'text/vtt',
|
||||
'extension': 'vtt',
|
||||
'codec': 'wvtt',
|
||||
},
|
||||
'ttml': {
|
||||
'mime_type': 'application/ttml+xml',
|
||||
'extension': 'ttml',
|
||||
'codec': 'ttml',
|
||||
},
|
||||
}
|
||||
|
||||
def __init__(self, context, video_id, use_mpd=None):
|
||||
super(Subtitles, self).__init__(context=context)
|
||||
|
||||
self.video_id = video_id
|
||||
|
||||
self.defaults = None
|
||||
self.headers = None
|
||||
self.renderer = None
|
||||
self.caption_tracks = None
|
||||
self.translation_langs = None
|
||||
|
||||
settings = context.get_settings()
|
||||
self.pre_download = settings.subtitle_download()
|
||||
self.sub_selection = settings.get_subtitle_selection()
|
||||
stream_features = settings.stream_features()
|
||||
if use_mpd is None:
|
||||
use_mpd = settings.use_mpd_videos()
|
||||
|
||||
use_isa = not self.pre_download and use_mpd
|
||||
self.use_isa = use_isa
|
||||
if use_isa:
|
||||
if ('ttml' in stream_features
|
||||
and context.inputstream_adaptive_capabilities('ttml')):
|
||||
self.FORMATS['_default'] = 'ttml'
|
||||
self.FORMATS['_fallback'] = 'ttml'
|
||||
if context.inputstream_adaptive_capabilities('vtt'):
|
||||
if 'vtt' in stream_features:
|
||||
self.FORMATS.setdefault('_default', 'vtt')
|
||||
self.FORMATS['_fallback'] = 'vtt'
|
||||
else:
|
||||
self.FORMATS.setdefault('_default', 'srt')
|
||||
self.FORMATS['_fallback'] = 'srt'
|
||||
else:
|
||||
if ('vtt' in stream_features
|
||||
and context.get_system_version().compatible(20)):
|
||||
self.FORMATS['_default'] = 'vtt'
|
||||
self.FORMATS['_fallback'] = 'vtt'
|
||||
else:
|
||||
self.FORMATS['_default'] = 'srt'
|
||||
self.FORMATS['_fallback'] = 'srt'
|
||||
|
||||
kodi_sub_lang = context.get_subtitle_language()
|
||||
plugin_lang = settings.get_language()
|
||||
if not kodi_sub_lang and plugin_lang:
|
||||
self.preferred_lang = (plugin_lang,)
|
||||
elif kodi_sub_lang:
|
||||
if not plugin_lang:
|
||||
self.preferred_lang = (kodi_sub_lang,)
|
||||
elif (plugin_lang.partition('-')[0]
|
||||
!= kodi_sub_lang.partition('-')[0]):
|
||||
self.preferred_lang = (kodi_sub_lang, plugin_lang)
|
||||
else:
|
||||
self.preferred_lang = (kodi_sub_lang,)
|
||||
else:
|
||||
self.preferred_lang = ('en',)
|
||||
|
||||
self.prompt_override = bool(
|
||||
context.get_ui().pop_property(PLAY_PROMPT_SUBTITLES)
|
||||
)
|
||||
|
||||
def load(self, captions, headers=None):
|
||||
if headers:
|
||||
headers.pop('Authorization', None)
|
||||
headers.pop('Content-Length', None)
|
||||
headers.pop('Content-Type', None)
|
||||
self.headers = headers
|
||||
|
||||
self.renderer = captions.get('playerCaptionsTracklistRenderer', {})
|
||||
self.caption_tracks = self.renderer.get('captionTracks', [])
|
||||
self.translation_langs = self.renderer.get('translationLanguages', [])
|
||||
self.translation_langs.extend(TRANSLATION_LANGUAGES)
|
||||
|
||||
try:
|
||||
default_audio = self.renderer.get('defaultAudioTrackIndex')
|
||||
default_audio = self.renderer.get('audioTracks')[default_audio]
|
||||
except (IndexError, TypeError):
|
||||
default_audio = None
|
||||
|
||||
self.defaults = {
|
||||
'default_lang': 'und',
|
||||
'original_lang': 'und',
|
||||
'is_asr': False,
|
||||
'base': None,
|
||||
'base_lang': None
|
||||
}
|
||||
if default_audio is None:
|
||||
return
|
||||
|
||||
default_caption = self.renderer.get(
|
||||
'defaultTranslationSourceTrackIndices', [None]
|
||||
)[0]
|
||||
|
||||
if default_caption is None and default_audio.get('hasDefaultTrack'):
|
||||
default_caption = default_audio.get('defaultCaptionTrackIndex')
|
||||
|
||||
if default_caption is None:
|
||||
try:
|
||||
default_caption = default_audio.get('captionTrackIndices')[0]
|
||||
except (IndexError, TypeError):
|
||||
default_caption = 0
|
||||
|
||||
try:
|
||||
default_caption = self.caption_tracks[default_caption] or {}
|
||||
except IndexError:
|
||||
return
|
||||
|
||||
asr_caption = [
|
||||
track
|
||||
for track in self.caption_tracks
|
||||
if track.get('kind') == 'asr'
|
||||
]
|
||||
original_caption = asr_caption and asr_caption[0] or {}
|
||||
|
||||
self.defaults = {
|
||||
'default_lang': default_caption.get('languageCode') or 'und',
|
||||
'original_lang': original_caption.get('languageCode') or 'und',
|
||||
'is_asr': default_caption.get('kind') == 'asr',
|
||||
'base': None,
|
||||
'base_lang': None,
|
||||
}
|
||||
if original_caption.get('isTranslatable'):
|
||||
self.defaults['base'] = original_caption
|
||||
self.defaults['base_lang'] = self.defaults['original_lang']
|
||||
elif default_caption.get('isTranslatable'):
|
||||
self.defaults['base'] = default_caption
|
||||
self.defaults['base_lang'] = self.defaults['default_lang']
|
||||
else:
|
||||
for track in self.caption_tracks:
|
||||
if track.get('isTranslatable'):
|
||||
base_lang = track.get('languageCode')
|
||||
if base_lang:
|
||||
self.defaults['base'] = track
|
||||
self.defaults['base_lang'] = base_lang
|
||||
break
|
||||
|
||||
def get_lang_details(self):
|
||||
return {
|
||||
'default': self.defaults['default_lang'],
|
||||
'original': self.defaults['original_lang'],
|
||||
'is_asr': self.defaults['is_asr'],
|
||||
}
|
||||
|
||||
def get_subtitles(self):
|
||||
selection = self.sub_selection
|
||||
|
||||
if self.prompt_override or selection == SUBTITLE_SELECTIONS['prompt']:
|
||||
return self._prompt()
|
||||
|
||||
if selection == SUBTITLE_SELECTIONS['none']:
|
||||
return None
|
||||
|
||||
if selection == SUBTITLE_SELECTIONS['all']:
|
||||
return self.get_all()
|
||||
|
||||
selected_options = SUBTITLE_SELECTIONS[selection]
|
||||
|
||||
allowed_langs = []
|
||||
|
||||
preferred_lang = self.preferred_lang
|
||||
for lang in preferred_lang:
|
||||
allowed_langs.append(lang)
|
||||
if '-' in lang:
|
||||
allowed_langs.append(lang.partition('-')[0])
|
||||
|
||||
use_asr = None
|
||||
if selected_options & SUBTITLE_OPTIONS['no_asr']:
|
||||
use_asr = False
|
||||
|
||||
original_lang = self.defaults['original_lang']
|
||||
if selected_options & SUBTITLE_OPTIONS['fallback']:
|
||||
fallback_langs = (original_lang, 'en', 'en-US', 'en-GB')
|
||||
for lang in fallback_langs:
|
||||
if lang not in allowed_langs:
|
||||
allowed_langs.append(lang)
|
||||
allowed_langs.append('ASR')
|
||||
else:
|
||||
fallback_langs = None
|
||||
|
||||
subtitles = {}
|
||||
for lang in allowed_langs:
|
||||
if lang != 'ASR':
|
||||
track_details = self._get_track(lang, use_asr=use_asr)
|
||||
elif subtitles:
|
||||
continue
|
||||
else:
|
||||
track_details = self._get_track(
|
||||
lang=None,
|
||||
use_asr=(use_asr is None or None),
|
||||
fallbacks=fallback_langs,
|
||||
)
|
||||
|
||||
track, track_lang, track_language, track_kind = track_details
|
||||
if not track:
|
||||
continue
|
||||
if track_kind:
|
||||
track_key = '_'.join((track_lang, track_kind))
|
||||
else:
|
||||
track_key = track_lang
|
||||
url, sub_format = self._get_url(track=track, lang=track_lang)
|
||||
if url:
|
||||
subtitles[track_key] = {
|
||||
'default': track_lang in preferred_lang,
|
||||
'original': track_lang == original_lang,
|
||||
'kind': track_kind,
|
||||
'lang': track_lang,
|
||||
'language': track_language,
|
||||
'mime_type': sub_format['mime_type'],
|
||||
'codec': sub_format['codec'],
|
||||
'url': url,
|
||||
}
|
||||
if subtitles and self.use_isa:
|
||||
subtitles['_headers'] = self.headers
|
||||
return subtitles
|
||||
|
||||
def get_all(self):
|
||||
subtitles = {}
|
||||
|
||||
preferred_lang = self.preferred_lang
|
||||
original_lang = self.defaults['original_lang']
|
||||
|
||||
for track in self.caption_tracks:
|
||||
track_lang = track.get('languageCode')
|
||||
track_kind = track.get('kind')
|
||||
track_language = self._get_language_name(track)
|
||||
url, sub_format = self._get_url(track=track)
|
||||
if url:
|
||||
if track_kind:
|
||||
track_key = '_'.join((track_lang, track_kind))
|
||||
else:
|
||||
track_key = track_lang
|
||||
subtitles[track_key] = {
|
||||
'default': track_lang in preferred_lang,
|
||||
'original': track_lang == original_lang,
|
||||
'kind': track_kind,
|
||||
'lang': track_lang,
|
||||
'language': track_language,
|
||||
'mime_type': sub_format['mime_type'],
|
||||
'codec': sub_format['codec'],
|
||||
'url': url,
|
||||
}
|
||||
|
||||
base = self.defaults['base']
|
||||
base_lang = self.defaults['base_lang']
|
||||
if base:
|
||||
for track in self.translation_langs:
|
||||
track_lang = track.get('languageCode')
|
||||
if not track_lang or track_lang in subtitles:
|
||||
continue
|
||||
track_language = self._get_language_name(track)
|
||||
url, sub_format = self._get_url(track=base, lang=track_lang)
|
||||
if url:
|
||||
track_key = '_'.join((base_lang, track_lang))
|
||||
subtitles[track_key] = {
|
||||
'default': track_lang in preferred_lang,
|
||||
'original': track_lang == original_lang,
|
||||
'kind': 'translation',
|
||||
'lang': track_lang,
|
||||
'language': track_language,
|
||||
'mime_type': sub_format['mime_type'],
|
||||
'codec': sub_format['codec'],
|
||||
'url': url,
|
||||
}
|
||||
|
||||
if subtitles and self.use_isa:
|
||||
subtitles['_headers'] = self.headers
|
||||
return subtitles
|
||||
|
||||
def _prompt(self):
|
||||
captions = [
|
||||
(track.get('languageCode'), self._get_language_name(track))
|
||||
for track in self.caption_tracks
|
||||
]
|
||||
translations = [
|
||||
(track.get('languageCode'), self._get_language_name(track))
|
||||
for track in self.translation_langs
|
||||
] if self.defaults['base'] else []
|
||||
num_captions = len(captions)
|
||||
num_translations = len(translations)
|
||||
num_total = num_captions + num_translations
|
||||
|
||||
if not num_total:
|
||||
self.log.debug('No subtitles found for prompt')
|
||||
else:
|
||||
localize = self._context.localize
|
||||
choice = self._context.get_ui().on_select(
|
||||
localize('subtitles.language'),
|
||||
[name for _, name in captions] +
|
||||
[localize('subtitles.translation.x', name)
|
||||
for _, name in translations]
|
||||
)
|
||||
|
||||
if 0 <= choice < num_captions:
|
||||
track = self.caption_tracks[choice]
|
||||
track_kind = track.get('kind')
|
||||
choice = captions[choice - num_captions]
|
||||
elif num_captions <= choice < num_total:
|
||||
track = self.defaults['base']
|
||||
track_kind = 'translation'
|
||||
choice = translations[choice - num_captions]
|
||||
else:
|
||||
self.log.debug('Subtitle selection cancelled')
|
||||
return False
|
||||
|
||||
lang, language = choice
|
||||
self.log.debug('Selected: %r', lang)
|
||||
url, sub_format = self._get_url(track=track, lang=lang)
|
||||
if url:
|
||||
subtitle = {
|
||||
lang: {
|
||||
'default': True,
|
||||
'original': lang == self.defaults['original_lang'],
|
||||
'kind': track_kind,
|
||||
'lang': lang,
|
||||
'language': language,
|
||||
'mime_type': sub_format['mime_type'],
|
||||
'codec': sub_format['codec'],
|
||||
'url': url,
|
||||
},
|
||||
}
|
||||
if self.use_isa:
|
||||
subtitle['_headers'] = self.headers
|
||||
return subtitle
|
||||
return None
|
||||
|
||||
def _get_url(self, track, lang=None):
|
||||
sub_format = self.FORMATS.get('_default')
|
||||
if not sub_format:
|
||||
self.log.error_trace('Invalid subtitle options selected')
|
||||
return None, None
|
||||
|
||||
tlang = None
|
||||
base_lang = track.get('languageCode')
|
||||
kind = track.get('kind')
|
||||
if lang and lang != base_lang:
|
||||
tlang = lang
|
||||
lang = '-'.join((base_lang, tlang))
|
||||
elif kind == 'asr':
|
||||
lang = '-'.join((base_lang, kind))
|
||||
sub_format = self.FORMATS['_fallback']
|
||||
else:
|
||||
lang = base_lang
|
||||
|
||||
download = self.pre_download
|
||||
if download:
|
||||
filename = '.'.join((
|
||||
self.video_id,
|
||||
lang,
|
||||
self.FORMATS[sub_format]['extension']
|
||||
))
|
||||
if not self.BASE_PATH:
|
||||
self.log.error_trace('Unable to access temp directory')
|
||||
return None, None
|
||||
|
||||
file_path = os.path.join(self.BASE_PATH, filename)
|
||||
if xbmcvfs.exists(file_path):
|
||||
self.log.debug(('Use existing subtitle for: {lang!r}',
|
||||
'File: {file}'),
|
||||
lang=lang,
|
||||
file=file_path)
|
||||
return file_path, self.FORMATS[sub_format]
|
||||
|
||||
base_url = self._normalize_url(track.get('baseUrl'))
|
||||
if not base_url:
|
||||
self.log.error_trace('No URL for: %r', lang)
|
||||
return None, None
|
||||
|
||||
subtitle_url = self._set_query_param(
|
||||
base_url,
|
||||
('type', 'track'),
|
||||
('fmt', sub_format),
|
||||
('tlang', tlang),
|
||||
('xosf', None),
|
||||
)
|
||||
self.log.debug(('Found new subtitle for: {lang!r}',
|
||||
'URL: {url}'),
|
||||
lang=lang,
|
||||
url=subtitle_url)
|
||||
|
||||
if not download:
|
||||
return subtitle_url, self.FORMATS[sub_format]
|
||||
|
||||
response = self.request(
|
||||
subtitle_url,
|
||||
headers=self.headers,
|
||||
error_title='Failed to download subtitle for: {sub_lang!r}',
|
||||
sub_lang=lang,
|
||||
)
|
||||
if response is None:
|
||||
return None, None
|
||||
with response:
|
||||
response_text = response.text
|
||||
if not response_text:
|
||||
return None, None
|
||||
|
||||
output = bytearray(self._unescape(response_text),
|
||||
encoding='utf8',
|
||||
errors='ignore')
|
||||
try:
|
||||
with xbmcvfs.File(file_path, 'w') as sub_file:
|
||||
success = sub_file.write(output)
|
||||
except (IOError, OSError):
|
||||
self.log.exception(('Write failed for: {lang!r}',
|
||||
'File: {file}'),
|
||||
lang=lang,
|
||||
file=file_path)
|
||||
if success:
|
||||
return file_path, self.FORMATS[sub_format]
|
||||
return None, None
|
||||
|
||||
def _get_track(self,
|
||||
lang='en',
|
||||
language=None,
|
||||
use_asr=None,
|
||||
fallbacks=None):
|
||||
sel_track = sel_lang = sel_language = sel_kind = None
|
||||
|
||||
for track in self.caption_tracks:
|
||||
track_lang = track.get('languageCode')
|
||||
track_language = self._get_language_name(track)
|
||||
track_kind = track.get('kind')
|
||||
is_asr = track_kind == 'asr'
|
||||
if lang and lang != track_lang:
|
||||
continue
|
||||
if not lang and fallbacks and track_lang not in fallbacks:
|
||||
continue
|
||||
if language is not None:
|
||||
if language == track_language:
|
||||
sel_track = track
|
||||
sel_lang = track_lang
|
||||
sel_language = track_language
|
||||
sel_kind = track_kind
|
||||
break
|
||||
elif (use_asr is False and is_asr) or (use_asr and not is_asr):
|
||||
continue
|
||||
elif (not sel_track
|
||||
or (use_asr is None and sel_kind == 'asr')
|
||||
or (use_asr and is_asr)):
|
||||
sel_track = track
|
||||
sel_lang = track_lang
|
||||
sel_language = track_language
|
||||
sel_kind = track_kind
|
||||
|
||||
if (not sel_track
|
||||
and lang
|
||||
and use_asr is None
|
||||
and self.defaults['base']
|
||||
and lang != self.defaults['base_lang']):
|
||||
for track in self.translation_langs:
|
||||
if lang == track.get('languageCode'):
|
||||
sel_track = self.defaults['base']
|
||||
sel_lang = lang
|
||||
sel_language = self._get_language_name(track)
|
||||
sel_kind = 'translation'
|
||||
break
|
||||
|
||||
if sel_track:
|
||||
return sel_track, sel_lang, sel_language, sel_kind
|
||||
|
||||
self.log.debug('No subtitle for: %r', lang)
|
||||
return None, None, None, None
|
||||
|
||||
@staticmethod
|
||||
def _get_language_name(track):
|
||||
lang_obj = None
|
||||
if 'languageName' in track:
|
||||
lang_obj = track['languageName']
|
||||
elif 'name' in track:
|
||||
lang_obj = track['name']
|
||||
|
||||
if not lang_obj:
|
||||
return None
|
||||
|
||||
lang_name = lang_obj.get('simpleText')
|
||||
if lang_name:
|
||||
return lang_name
|
||||
|
||||
track_name = lang_obj.get('runs')
|
||||
if isinstance(track_name, (list, tuple)) and len(track_name) >= 1:
|
||||
lang_name = track_name[0].get('text')
|
||||
|
||||
return lang_name
|
||||
|
||||
@staticmethod
|
||||
def _set_query_param(url, *pairs):
|
||||
if not url or not pairs:
|
||||
return url
|
||||
|
||||
num_params = len(pairs)
|
||||
if not num_params:
|
||||
return url
|
||||
if not isinstance(pairs[0], (list, tuple)):
|
||||
if num_params >= 2:
|
||||
pairs = zip(*[iter(pairs)] * 2)
|
||||
else:
|
||||
return url
|
||||
|
||||
components = urlsplit(url)
|
||||
query_params = parse_qs(components.query)
|
||||
|
||||
for name, value in pairs:
|
||||
if not name:
|
||||
continue
|
||||
if isinstance(value, (list, tuple)):
|
||||
query_params[name] = value
|
||||
elif value is not None:
|
||||
query_params[name] = [value]
|
||||
elif name in query_params:
|
||||
del query_params[name]
|
||||
|
||||
return components._replace(
|
||||
query=urlencode(query_params, doseq=True)
|
||||
).geturl()
|
||||
@@ -0,0 +1,15 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
|
||||
Copyright (C) 2016-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from .resource_manager import ResourceManager
|
||||
from .url_resolver import UrlResolver
|
||||
from .url_to_item_converter import UrlToItemConverter
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,15 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2021 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from . import ratebypass
|
||||
|
||||
|
||||
__all__ = ('ratebypass',)
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,461 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Copyright (C) 2012-2021 https://github.com/pytube/pytube/
|
||||
SPDX-License-Identifier: Unlicense
|
||||
|
||||
Copyright (C) 2021 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from ....kodion import logging
|
||||
|
||||
|
||||
def throttling_reverse(arr):
|
||||
"""Reverses the input list.
|
||||
Needs to do an in-place reversal so that the passed list gets changed.
|
||||
To accomplish this, we create a reversed copy, and then change each
|
||||
individual element.
|
||||
"""
|
||||
reverse_copy = arr[::-1]
|
||||
for i in range(len(reverse_copy)):
|
||||
arr[i] = reverse_copy[i]
|
||||
|
||||
|
||||
def throttling_push(d, e):
|
||||
"""Pushes an element onto a list."""
|
||||
d.append(e)
|
||||
|
||||
|
||||
def throttling_mod_func(d, e):
|
||||
"""Perform the modular function from the throttling array functions.
|
||||
In the javascript, the modular operation is as follows:
|
||||
e = (e % d.length + d.length) % d.length
|
||||
We simply translate this to python here.
|
||||
"""
|
||||
return (e % len(d) + len(d)) % len(d)
|
||||
|
||||
|
||||
def throttling_unshift(d, e):
|
||||
"""Rotates the elements of the list to the right.
|
||||
In the javascript, the operation is as follows:
|
||||
for(e=(e%d.length+d.length)%d.length;e--;)d.unshift(d.pop())
|
||||
"""
|
||||
e = throttling_mod_func(d, e)
|
||||
new_arr = d[-e:] + d[:-e]
|
||||
del d[:]
|
||||
for el in new_arr:
|
||||
d.append(el)
|
||||
|
||||
|
||||
def throttling_cipher_helper(d, e, h):
|
||||
"""This ciphers d with e to generate a new list.
|
||||
In the javascript, the operation is as follows:
|
||||
var h = [A-Za-z0-9-_], f = 96; // simplified from switch-case loop
|
||||
d.forEach(
|
||||
function(l,m,n){
|
||||
this.push(
|
||||
n[m]=h[
|
||||
(h.indexOf(l)-h.indexOf(this[m])+m-32+f--)%h.length
|
||||
]
|
||||
)
|
||||
},
|
||||
e.split("")
|
||||
)
|
||||
"""
|
||||
f = 96
|
||||
# by naming it "this" we can more closely reflect the js
|
||||
this = list(e)
|
||||
|
||||
# This is so we don't run into weirdness with enumerate while
|
||||
# we change the input list
|
||||
copied_list = d[:]
|
||||
|
||||
for m, l in enumerate(copied_list):
|
||||
bracket_val = (h.index(l) - h.index(this[m]) + m - 32 + f) % len(h)
|
||||
this.append(
|
||||
h[bracket_val]
|
||||
)
|
||||
d[m] = h[bracket_val]
|
||||
f -= 1
|
||||
|
||||
|
||||
def throttling_cipher_function_a(d, e):
|
||||
# The code to generate the 'h' list produces this result below.
|
||||
h = list('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_')
|
||||
throttling_cipher_helper(d, e, h)
|
||||
|
||||
|
||||
def throttling_cipher_function_b(d, e):
|
||||
# Variant function that uses a different 'h' base string.
|
||||
# This is identifiable from the "case 65" pattern in the function body.
|
||||
h = list('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_')
|
||||
throttling_cipher_helper(d, e, h)
|
||||
|
||||
|
||||
def throttling_nested_splice(d, e):
|
||||
"""Nested splice function in throttling js.
|
||||
In the javascript, the operation is as follows:
|
||||
function(d,e){
|
||||
e=(e%d.length+d.length)%d.length;
|
||||
d.splice(
|
||||
0,
|
||||
1,
|
||||
d.splice(
|
||||
e,
|
||||
1,
|
||||
d[0]
|
||||
)[0]
|
||||
)
|
||||
}
|
||||
While testing, all this seemed to do is swap element 0 and e,
|
||||
but the actual process is preserved in case there was an edge
|
||||
case that was not considered.
|
||||
"""
|
||||
e = throttling_mod_func(d, e)
|
||||
inner_splice = js_splice(
|
||||
d,
|
||||
e,
|
||||
1,
|
||||
d[0]
|
||||
)
|
||||
js_splice(
|
||||
d,
|
||||
0,
|
||||
1,
|
||||
inner_splice[0]
|
||||
)
|
||||
|
||||
|
||||
def throttling_prepend(d, e):
|
||||
"""
|
||||
In the javascript, the operation is as follows:
|
||||
function(d,e){
|
||||
e=(e%d.length+d.length)%d.length;
|
||||
d.splice(-e).reverse().forEach(
|
||||
function(f){
|
||||
d.unshift(f)
|
||||
}
|
||||
)
|
||||
}
|
||||
Effectively, this moves the last e elements of d to the beginning.
|
||||
"""
|
||||
start_len = len(d)
|
||||
# First, calculate e
|
||||
e = throttling_mod_func(d, e)
|
||||
|
||||
# Then do the prepending
|
||||
new_arr = d[-e:] + d[:-e]
|
||||
|
||||
# And update the input list
|
||||
del d[:]
|
||||
for el in new_arr:
|
||||
d.append(el)
|
||||
|
||||
end_len = len(d)
|
||||
assert start_len == end_len
|
||||
|
||||
|
||||
def throttling_swap(d, e):
|
||||
"""Swap positions of the 0'th and e'th elements in-place."""
|
||||
e = throttling_mod_func(d, e)
|
||||
f = d[0]
|
||||
d[0] = d[e]
|
||||
d[e] = f
|
||||
|
||||
|
||||
def js_splice(arr, start, delete_count=None, *items):
|
||||
"""Implementation of javascript's splice function.
|
||||
:param list arr:
|
||||
Array to splice
|
||||
:param int start:
|
||||
Index at which to start changing the array
|
||||
:param int delete_count:
|
||||
Number of elements to delete from the array
|
||||
:param items:
|
||||
Items to add to the array
|
||||
Reference: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/splice # noqa:E501
|
||||
"""
|
||||
# Special conditions for start value
|
||||
try:
|
||||
if start > len(arr):
|
||||
start = len(arr)
|
||||
# If start is negative, count backwards from end
|
||||
if start < 0:
|
||||
start = len(arr) - start
|
||||
except TypeError:
|
||||
# Non-integer start values are treated as 0 in js
|
||||
start = 0
|
||||
|
||||
# Special condition when delete_count is greater than remaining elements
|
||||
if not delete_count or delete_count >= len(arr) - start:
|
||||
delete_count = len(arr) - start # noqa: N806
|
||||
|
||||
deleted_elements = arr[start:start + delete_count]
|
||||
|
||||
# Splice appropriately.
|
||||
new_arr = arr[:start] + list(items) + arr[start + delete_count:]
|
||||
|
||||
# Replace contents of input array
|
||||
del arr[:]
|
||||
for el in new_arr:
|
||||
arr.append(el)
|
||||
|
||||
return deleted_elements
|
||||
|
||||
|
||||
def throttling_splice(d, e):
|
||||
"""Splices array 'd' with remapped start index e.
|
||||
From this code: function(d,e){e=(e%d.length+d.length)%d.length;d.splice(e,1)}
|
||||
"""
|
||||
e = throttling_mod_func(d, e)
|
||||
js_splice(d, e, 1)
|
||||
|
||||
|
||||
class CalculateN(object):
|
||||
# References:
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/29326#issuecomment-894619419
|
||||
# https://github.com/pytube/pytube/blob/fc9aec5c35829f2ebb4ef8dd599b14a666850d20/pytube/cipher.py
|
||||
|
||||
# To maintainers: it might be necessary to add more function patterns (and implementations)
|
||||
# in the future as the 'base.js' player code gets changed and updated.
|
||||
MAPPING_FUNC_PATTERNS = (
|
||||
(r"{for\(\w=\(\w%\w\.length\+\w\.length\)%\w\.length;\w--;\)\w\.unshift\(\w.pop\(\)\)}", throttling_unshift), # noqa:E501
|
||||
(r"{\w\.reverse\(\)}", throttling_reverse),
|
||||
(r"{\w\.push\(\w\)}", throttling_push),
|
||||
(r";var\s\w=\w\[0\];\w\[0\]=\w\[\w\];\w\[\w\]=\w}", throttling_swap),
|
||||
(r"case\s65", throttling_cipher_function_b),
|
||||
(r"case\s\d+", throttling_cipher_function_a),
|
||||
(r"\w\.splice\(0,1,\w\.splice\(\w,1,\w\[0\]\)\[0\]\)", throttling_nested_splice), # noqa:E501
|
||||
(r";\w\.splice\(\w,1\)}", throttling_splice),
|
||||
(r"\w\.splice\(-\w\)\.reverse\(\)\.forEach\(function\(\w\){\w\.unshift\(\w\)}\)", throttling_prepend), # noqa:E501
|
||||
(r"for\(var \w=\w\.length;\w;\)\w\.push\(\w\.splice\(--\w,1\)\[0\]\)}", throttling_reverse), # noqa:E501
|
||||
)
|
||||
|
||||
def __init__(self, js):
|
||||
self.calculated_n = None
|
||||
self.throttling_function_code = self.get_throttling_function_code(js)
|
||||
|
||||
@staticmethod
|
||||
def get_throttling_function_code(js):
|
||||
"""Extract the raw code for the throttling function.
|
||||
:param str js:
|
||||
The contents of the 'base.js' asset file.
|
||||
:rtype
|
||||
:returns:
|
||||
The JS code of the function as a string, with linebreaks removed.
|
||||
"""
|
||||
# This pattern is only present in the throttling function code.
|
||||
fiduciary_index = js.find('enhanced_except_')
|
||||
if fiduciary_index == -1:
|
||||
logging.debug('fiduciary_index not found')
|
||||
return None
|
||||
|
||||
start_index = js.rfind('=function(', 0, fiduciary_index)
|
||||
if start_index == -1:
|
||||
logging.debug('function code start not found')
|
||||
return None
|
||||
|
||||
end_index = js.find('};', fiduciary_index)
|
||||
if end_index == -1:
|
||||
logging.debug('function code end not found')
|
||||
return None
|
||||
|
||||
return js[start_index:end_index].replace('\n', '')
|
||||
|
||||
@staticmethod
|
||||
def get_throttling_plan_gen(raw_code):
|
||||
"""Extracts the 'throttling plan' and generates its commands.
|
||||
The "plan" is a list of indices into the 'c' array that grab functions
|
||||
and values used with those functions, used to unscramble the 'n' value.
|
||||
:param str raw_code:
|
||||
The response from get_throttling_function_code(js).
|
||||
:returns:
|
||||
An iterable of "command" tuples, where the first element of each
|
||||
tuple is the (stringified) index of a function in the 'c' array
|
||||
and the remaining elements are indices of the arguments to be
|
||||
sent to the call.
|
||||
"""
|
||||
# So far, the commands used to calculate 'n' are function calls of
|
||||
# either the form c[x](c[y]) or c[x](c[y],c[z]), and come in a
|
||||
# try/catch block like this:
|
||||
# "try{c[0](c[44],c[37]),c[20](c[12],c[27]),c[0](c[12],c[2]), ...}".
|
||||
plan_start_pattern = 'try{'
|
||||
plan_start_index = raw_code.find(plan_start_pattern)
|
||||
if plan_start_index == -1:
|
||||
logging.debug('command block start not found')
|
||||
raise Exception()
|
||||
else:
|
||||
# Skip the whole start pattern, it's not needed.
|
||||
plan_start_index += len(plan_start_pattern)
|
||||
|
||||
plan_end_index = raw_code.find('}', plan_start_index)
|
||||
if plan_end_index == -1:
|
||||
logging.debug('command block end not found')
|
||||
raise Exception()
|
||||
|
||||
plan_code = raw_code[plan_start_index:plan_end_index]
|
||||
|
||||
# Each command will be split from "c[x](c[y],c[z],...)" into
|
||||
# ('x', 'y', 'z', ...), that is, a sequence of stringified ints.
|
||||
#
|
||||
# So far, either one or two parameters are used:
|
||||
# "c[x](c[y])" -> ('x', 'y')
|
||||
# "c[x](c[y],c[z])" -> ('x', 'y', 'z')
|
||||
for command in (plan_code.strip('c)').replace('[', '')
|
||||
.replace(']', '').replace('(', ',')
|
||||
.replace('c', '').split('),')):
|
||||
yield command.split(',')
|
||||
|
||||
@staticmethod
|
||||
def array_reverse_split_gen(array_code):
|
||||
"""Iterates the comma-split pieces of the stringified list in reverse,
|
||||
joining pieces that are part of the same longer object that might
|
||||
have comma characters inside.
|
||||
:param str array_code:
|
||||
The 'c' array string, without enclosing brackets.
|
||||
:returns:
|
||||
Generates the elements of the stringified array in REVERSE order.
|
||||
The caller is responsible for reversing it back to normal.
|
||||
"""
|
||||
accumulator = None
|
||||
for piece in reversed(array_code.split(',')):
|
||||
if piece.startswith('function') or piece[0] == '"' or piece[0] == "'":
|
||||
# When the piece starts with "function" or a quote char, yield
|
||||
# what has been accumulated so far, if anything.
|
||||
if accumulator:
|
||||
yield piece + ',' + accumulator
|
||||
accumulator = None
|
||||
else:
|
||||
yield piece
|
||||
elif piece.endswith('}') or piece[-1] == '"' or piece[-1] == "'":
|
||||
# When the piece ends with a curly bracket or quote char but
|
||||
# didn't start with "function" or a quote char, start
|
||||
# accumulating with the next pieces until it's closed.
|
||||
accumulator = piece
|
||||
else:
|
||||
if accumulator:
|
||||
accumulator = piece + ',' + accumulator
|
||||
else:
|
||||
yield piece
|
||||
|
||||
@classmethod
|
||||
def get_throttling_function_array(cls, mutable_n_list, raw_code):
|
||||
"""Extract the 'c' array that comes with values and functions
|
||||
used to unscramble the initial 'n' value.
|
||||
:param list mutable_n_list:
|
||||
Mutable list with the characters of the 'initial n' value.
|
||||
:param str raw_code:
|
||||
The response from get_throttling_function_code(js).
|
||||
:returns:
|
||||
The array of various integers, arrays, and functions.
|
||||
"""
|
||||
|
||||
array_start_pattern = ",c=["
|
||||
array_start_index = raw_code.find(array_start_pattern)
|
||||
if array_start_index == -1:
|
||||
logging.debug('"c" array pattern not found')
|
||||
raise Exception()
|
||||
else:
|
||||
array_start_index += len(array_start_pattern)
|
||||
|
||||
array_end_index = raw_code.rfind('];')
|
||||
if array_end_index == -1:
|
||||
logging.debug('"c" array end not found')
|
||||
raise Exception()
|
||||
|
||||
array_code = raw_code[array_start_index:array_end_index]
|
||||
|
||||
converted_array = []
|
||||
for el in cls.array_reverse_split_gen(array_code):
|
||||
try:
|
||||
converted_array.append(int(el))
|
||||
continue
|
||||
except ValueError:
|
||||
# Not an integer value.
|
||||
pass
|
||||
|
||||
if el == 'null':
|
||||
# Replace null elements in this array with references to itself.
|
||||
converted_array.append(converted_array)
|
||||
continue
|
||||
|
||||
if el[0] == '"' or el[0] == "'":
|
||||
# Strip quotation marks in string elements.
|
||||
converted_array.append(el.strip('\'"'))
|
||||
continue
|
||||
|
||||
if el.startswith('function'):
|
||||
found = False
|
||||
for pattern, fn in cls.MAPPING_FUNC_PATTERNS:
|
||||
if re.search(pattern, el):
|
||||
converted_array.append(fn)
|
||||
found = True
|
||||
break
|
||||
else:
|
||||
logging.debug('unknown mapping function: %s', el)
|
||||
if found:
|
||||
continue
|
||||
|
||||
# Probably the single 'b' references (references to the list with
|
||||
# initial 'n' characters).
|
||||
converted_array.append(mutable_n_list)
|
||||
|
||||
# Reverse in-place (instead of using a [::-1] slice), important as
|
||||
# there are references to this array within itself.
|
||||
converted_array.reverse()
|
||||
return converted_array
|
||||
|
||||
def calculate_n(self, mutable_n_list):
|
||||
"""Converts n to the correct value to prevent throttling.
|
||||
:param list mutable_n_list:
|
||||
A list with the characters of the initial 'n' string. This list
|
||||
will be modified by this function.
|
||||
:returns:
|
||||
The new value of 'n' as a string, to replace the value in the
|
||||
video stream URL.
|
||||
"""
|
||||
if self.calculated_n:
|
||||
logging.debug('Reusing calculated "n": %s', self.calculated_n)
|
||||
return self.calculated_n
|
||||
|
||||
if not self.throttling_function_code:
|
||||
return None
|
||||
|
||||
logging.debug('Attempting to calculate "n" from initial: %s',
|
||||
''.join(mutable_n_list))
|
||||
|
||||
# For each step in the plan, get the first item of the step as the
|
||||
# index of the function to call, and then call that function using
|
||||
# the throttling array elements indexed by the remaining step items.
|
||||
try:
|
||||
throttling_array = self.get_throttling_function_array(
|
||||
mutable_n_list,
|
||||
self.throttling_function_code
|
||||
)
|
||||
for step in self.get_throttling_plan_gen(self.throttling_function_code):
|
||||
curr_func = throttling_array[int(step[0])]
|
||||
if not callable(curr_func):
|
||||
logging.debug('%s is not callable', curr_func)
|
||||
logging.debug(('Throttling array:', '%r'), throttling_array)
|
||||
return None
|
||||
|
||||
first_arg = throttling_array[int(step[1])]
|
||||
|
||||
if len(step) == 2:
|
||||
curr_func(first_arg)
|
||||
elif len(step) == 3:
|
||||
second_arg = throttling_array[int(step[2])]
|
||||
curr_func(first_arg, second_arg)
|
||||
except Exception:
|
||||
logging.exception('Error calculating "n"')
|
||||
return None
|
||||
|
||||
self.calculated_n = ''.join(mutable_n_list)
|
||||
logging.debug('Calculated "n": %s', self.calculated_n)
|
||||
return self.calculated_n
|
||||
@@ -0,0 +1,670 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
|
||||
Copyright (C) 2016-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from itertools import chain
|
||||
|
||||
from .utils import get_thumbnail
|
||||
from ...kodion import logging
|
||||
from ...kodion.constants import CHANNEL_ID, FANART_TYPE, INCOGNITO
|
||||
|
||||
|
||||
class ResourceManager(object):
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def __init__(self, provider, context, client, progress_dialog=None):
|
||||
self._provider = provider
|
||||
self._context = context
|
||||
self._client = client
|
||||
self._progress_dialog = progress_dialog
|
||||
|
||||
self.new_data = {}
|
||||
|
||||
params = context.get_params()
|
||||
self._incognito = params.get(INCOGNITO)
|
||||
|
||||
fanart_type = params.get(FANART_TYPE)
|
||||
settings = context.get_settings()
|
||||
if fanart_type is None:
|
||||
fanart_type = settings.fanart_selection()
|
||||
self._channel_fanart = fanart_type == settings.FANART_CHANNEL
|
||||
self._thumb_size = settings.get_thumbnail_size()
|
||||
|
||||
def context_changed(self, context, client):
|
||||
return self._context != context or self._client != client
|
||||
|
||||
def update_progress_dialog(self, progress_dialog):
|
||||
old_progress_dialog = self._progress_dialog
|
||||
if not progress_dialog or old_progress_dialog == progress_dialog:
|
||||
return
|
||||
if old_progress_dialog:
|
||||
old_progress_dialog.close()
|
||||
self._progress_dialog = progress_dialog
|
||||
|
||||
def _list_batch(self, input_list, n=50):
|
||||
if not isinstance(input_list, (list, tuple)):
|
||||
input_list = list(input_list)
|
||||
num_items = len(input_list)
|
||||
for i in range(0, num_items, n):
|
||||
yield input_list[i:i + n]
|
||||
if self._progress_dialog:
|
||||
self._progress_dialog.update(steps=min(n, num_items))
|
||||
|
||||
def get_channels(self, ids, suppress_errors=False, defer_cache=False):
|
||||
context = self._context
|
||||
client = self._client
|
||||
data_cache = context.get_data_cache()
|
||||
function_cache = context.get_function_cache()
|
||||
|
||||
refresh = context.refresh_requested()
|
||||
forced_cache = not function_cache.run(
|
||||
client.internet_available,
|
||||
function_cache.ONE_MINUTE * 5,
|
||||
_refresh=refresh,
|
||||
)
|
||||
refresh = not forced_cache and refresh
|
||||
|
||||
updated = []
|
||||
handles = {}
|
||||
for identifier in ids:
|
||||
if not identifier:
|
||||
continue
|
||||
|
||||
if identifier != 'mine' and not identifier.startswith('@'):
|
||||
updated.append(identifier)
|
||||
continue
|
||||
|
||||
channel_id = function_cache.run(
|
||||
client.get_channel_by_identifier,
|
||||
function_cache.ONE_MONTH,
|
||||
_refresh=refresh,
|
||||
identifier=identifier,
|
||||
)
|
||||
if channel_id:
|
||||
updated.append(channel_id)
|
||||
if channel_id != identifier:
|
||||
handles[channel_id] = identifier
|
||||
|
||||
ids = updated
|
||||
if refresh or not ids:
|
||||
result = {}
|
||||
else:
|
||||
result = data_cache.get_items(
|
||||
ids,
|
||||
None if forced_cache else data_cache.ONE_DAY,
|
||||
memory_store=self.new_data,
|
||||
)
|
||||
to_update = [id_ for id_ in ids
|
||||
if id_
|
||||
and (id_ not in result
|
||||
or not result[id_]
|
||||
or result[id_].get('_partial'))]
|
||||
|
||||
if result:
|
||||
self.log.debugging and self.log.debug(
|
||||
('Using cached data for {num} channel(s)',
|
||||
'Channel IDs: {ids}'),
|
||||
num=len(result),
|
||||
ids=list(result),
|
||||
)
|
||||
if self._progress_dialog:
|
||||
self._progress_dialog.update(steps=len(result) - len(to_update))
|
||||
|
||||
if to_update:
|
||||
notify_and_raise = not suppress_errors
|
||||
new_data = [client.get_channels(list_of_50,
|
||||
max_results=50,
|
||||
notify=notify_and_raise,
|
||||
raise_exc=notify_and_raise)
|
||||
for list_of_50 in self._list_batch(to_update, n=50)]
|
||||
if any(new_data):
|
||||
new_data = {
|
||||
yt_item['id']: yt_item
|
||||
for batch in new_data
|
||||
for yt_item in batch.get('items', [])
|
||||
if yt_item
|
||||
}
|
||||
else:
|
||||
new_data = None
|
||||
else:
|
||||
new_data = None
|
||||
|
||||
if new_data:
|
||||
self.log.debugging and self.log.debug(
|
||||
('Retrieved new data for {num} channel(s)',
|
||||
'Channel IDs: {ids}'),
|
||||
num=len(to_update),
|
||||
ids=to_update,
|
||||
)
|
||||
result.update(new_data)
|
||||
self.cache_data(new_data, defer=defer_cache)
|
||||
|
||||
# Re-sort result to match order of requested IDs
|
||||
# Will only work in Python v3.7+
|
||||
if handles or list(result) != ids[:len(result)]:
|
||||
result = {
|
||||
handles.get(id_, id_): result[id_]
|
||||
for id_ in ids
|
||||
if id_ in result
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
def get_channel_info(self,
|
||||
ids,
|
||||
channel_data=None,
|
||||
suppress_errors=False,
|
||||
defer_cache=False):
|
||||
context = self._context
|
||||
client = self._client
|
||||
function_cache = context.get_function_cache()
|
||||
|
||||
refresh = context.refresh_requested()
|
||||
forced_cache = not function_cache.run(
|
||||
client.internet_available,
|
||||
function_cache.ONE_MINUTE * 5,
|
||||
_refresh=refresh,
|
||||
)
|
||||
refresh = not forced_cache and refresh
|
||||
|
||||
if not refresh and channel_data:
|
||||
result = channel_data
|
||||
else:
|
||||
result = {}
|
||||
|
||||
to_check = [id_ for id_ in ids
|
||||
if id_
|
||||
and (id_ not in result
|
||||
or not result[id_]
|
||||
or result[id_].get('_partial'))]
|
||||
if to_check:
|
||||
data_cache = context.get_data_cache()
|
||||
result.update(data_cache.get_items(
|
||||
to_check,
|
||||
None if forced_cache else data_cache.ONE_MONTH,
|
||||
memory_store=self.new_data,
|
||||
))
|
||||
to_update = [id_ for id_ in ids
|
||||
if id_
|
||||
and (id_ not in result
|
||||
or not result[id_]
|
||||
or result[id_].get('_partial'))]
|
||||
|
||||
if result:
|
||||
self.log.debugging and self.log.debug(
|
||||
('Using cached data for {num} channel(s)',
|
||||
'Channel IDs: {ids}'),
|
||||
num=len(result),
|
||||
ids=list(result),
|
||||
)
|
||||
if self._progress_dialog:
|
||||
self._progress_dialog.update(steps=len(result) - len(to_update))
|
||||
|
||||
if to_update:
|
||||
notify_and_raise = not suppress_errors
|
||||
new_data = [client.get_channels(list_of_50,
|
||||
max_results=50,
|
||||
notify=notify_and_raise,
|
||||
raise_exc=notify_and_raise)
|
||||
for list_of_50 in self._list_batch(to_update, n=50)]
|
||||
if any(new_data):
|
||||
new_data = {
|
||||
yt_item['id']: yt_item
|
||||
for batch in new_data
|
||||
for yt_item in batch.get('items', [])
|
||||
if yt_item
|
||||
}
|
||||
else:
|
||||
new_data = None
|
||||
else:
|
||||
new_data = None
|
||||
|
||||
if new_data:
|
||||
self.log.debugging and self.log.debug(
|
||||
('Retrieved new data for {num} channel(s)',
|
||||
'Channel IDs: {ids}'),
|
||||
num=len(to_update),
|
||||
ids=to_update,
|
||||
)
|
||||
result.update(new_data)
|
||||
self.cache_data(new_data, defer=defer_cache)
|
||||
|
||||
banners = (
|
||||
'bannerTvMediumImageUrl',
|
||||
'bannerTvLowImageUrl',
|
||||
'bannerTvImageUrl',
|
||||
'bannerExternalUrl',
|
||||
)
|
||||
untitled = context.localize('untitled')
|
||||
thumb_size = self._thumb_size
|
||||
channel_fanart = self._channel_fanart
|
||||
|
||||
# transform
|
||||
for key, item in result.items():
|
||||
channel_info = {
|
||||
'name': None,
|
||||
'image': None,
|
||||
'fanart': None,
|
||||
}
|
||||
|
||||
if channel_fanart:
|
||||
images = item.get('brandingSettings', {}).get('image', {})
|
||||
for banner in banners:
|
||||
image = images.get(banner)
|
||||
if image:
|
||||
channel_info['fanart'] = image
|
||||
break
|
||||
|
||||
snippet = item.get('snippet')
|
||||
if snippet:
|
||||
localised_info = snippet.get('localized') or {}
|
||||
channel_info['name'] = (localised_info.get('title')
|
||||
or snippet.get('title')
|
||||
or untitled)
|
||||
channel_info['image'] = get_thumbnail(thumb_size,
|
||||
snippet.get('thumbnails'))
|
||||
result[key] = channel_info
|
||||
|
||||
return result
|
||||
|
||||
def get_playlists(self, ids, suppress_errors=False, defer_cache=False):
|
||||
ids = tuple(ids)
|
||||
|
||||
context = self._context
|
||||
client = self._client
|
||||
function_cache = context.get_function_cache()
|
||||
|
||||
refresh = context.refresh_requested()
|
||||
forced_cache = not function_cache.run(
|
||||
client.internet_available,
|
||||
function_cache.ONE_MINUTE * 5,
|
||||
_refresh=refresh,
|
||||
)
|
||||
refresh = not forced_cache and refresh
|
||||
|
||||
if refresh or not ids:
|
||||
result = {}
|
||||
else:
|
||||
data_cache = context.get_data_cache()
|
||||
result = data_cache.get_items(
|
||||
ids,
|
||||
None if forced_cache else data_cache.ONE_DAY,
|
||||
memory_store=self.new_data,
|
||||
)
|
||||
to_update = [id_ for id_ in ids
|
||||
if id_
|
||||
and (id_ not in result
|
||||
or not result[id_]
|
||||
or result[id_].get('_partial'))]
|
||||
|
||||
if result:
|
||||
self.log.debugging and self.log.debug(
|
||||
('Using cached data for {num} playlist(s)',
|
||||
'Playlist IDs: {ids}'),
|
||||
num=len(result),
|
||||
ids=list(result),
|
||||
)
|
||||
if self._progress_dialog:
|
||||
self._progress_dialog.update(steps=len(result) - len(to_update))
|
||||
|
||||
if to_update:
|
||||
notify_and_raise = not suppress_errors
|
||||
new_data = [client.get_playlists(list_of_50,
|
||||
max_results=50,
|
||||
notify=notify_and_raise,
|
||||
raise_exc=notify_and_raise)
|
||||
for list_of_50 in self._list_batch(to_update, n=50)]
|
||||
if any(new_data):
|
||||
new_data = {
|
||||
yt_item['id']: yt_item
|
||||
for batch in new_data
|
||||
for yt_item in batch.get('items', [])
|
||||
if yt_item
|
||||
}
|
||||
else:
|
||||
new_data = None
|
||||
else:
|
||||
new_data = None
|
||||
|
||||
if new_data:
|
||||
self.log.debugging and self.log.debug(
|
||||
('Retrieved new data for {num} playlist(s)',
|
||||
'Playlist IDs: {ids}'),
|
||||
num=len(to_update),
|
||||
ids=to_update,
|
||||
)
|
||||
result.update(new_data)
|
||||
self.cache_data(new_data, defer=defer_cache)
|
||||
|
||||
# Re-sort result to match order of requested IDs
|
||||
# Will only work in Python v3.7+
|
||||
if list(result) != ids[:len(result)]:
|
||||
result = {
|
||||
id_: result[id_]
|
||||
for id_ in ids
|
||||
if id_ in result
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
def get_playlist_items(self,
|
||||
ids=None,
|
||||
batch_id=None,
|
||||
page_token=None,
|
||||
defer_cache=False,
|
||||
flatten=False,
|
||||
**kwargs):
|
||||
if not ids and not batch_id:
|
||||
return None
|
||||
|
||||
context = self._context
|
||||
client = self._client
|
||||
function_cache = context.get_function_cache()
|
||||
|
||||
refresh = context.refresh_requested()
|
||||
forced_cache = (
|
||||
not function_cache.run(
|
||||
client.internet_available,
|
||||
function_cache.ONE_MINUTE * 5,
|
||||
_refresh=refresh,
|
||||
)
|
||||
or (context.get_param(CHANNEL_ID) == 'mine'
|
||||
and not client.logged_in)
|
||||
)
|
||||
refresh = not forced_cache and refresh
|
||||
|
||||
if batch_id:
|
||||
ids = [batch_id[0]]
|
||||
page_token = batch_id[1] or page_token
|
||||
fetch_next = False
|
||||
elif page_token is None:
|
||||
fetch_next = True
|
||||
elif len(ids) == 1:
|
||||
fetch_next = False
|
||||
else:
|
||||
page_token = None
|
||||
fetch_next = True
|
||||
|
||||
data_cache = context.get_data_cache()
|
||||
batch_ids = []
|
||||
to_update = []
|
||||
result = {}
|
||||
for playlist_id in ids:
|
||||
page_token = page_token or 0
|
||||
while 1:
|
||||
batch_id = (playlist_id, page_token)
|
||||
batch_ids.append(batch_id)
|
||||
if refresh:
|
||||
batch = None
|
||||
else:
|
||||
batch = data_cache.get_item(
|
||||
'{0},{1}'.format(*batch_id),
|
||||
as_dict=True,
|
||||
)
|
||||
if not batch:
|
||||
to_update.append(batch_id)
|
||||
break
|
||||
age = batch.get('age')
|
||||
batch = batch.get('value')
|
||||
if forced_cache:
|
||||
result[batch_id] = batch
|
||||
elif page_token:
|
||||
if age <= data_cache.ONE_DAY:
|
||||
result[batch_id] = batch
|
||||
else:
|
||||
to_update.append(batch_id)
|
||||
break
|
||||
else:
|
||||
if age <= data_cache.ONE_MINUTE * 5:
|
||||
result[batch_id] = batch
|
||||
else:
|
||||
to_update.append(batch_id)
|
||||
page_token = batch.get('nextPageToken') if fetch_next else None
|
||||
if not page_token:
|
||||
break
|
||||
|
||||
if result:
|
||||
self.log.debugging and self.log.debug(
|
||||
('Using cached data for {num} playlist part(s)',
|
||||
'Batch IDs: {ids}'),
|
||||
num=len(result),
|
||||
ids=list(result),
|
||||
)
|
||||
if self._progress_dialog:
|
||||
self._progress_dialog.update(steps=len(result) - len(to_update))
|
||||
|
||||
new_data = {}
|
||||
insert_point = 0
|
||||
for playlist_id, page_token in to_update:
|
||||
new_batch_ids = []
|
||||
batch_id = (playlist_id, page_token)
|
||||
insert_point = batch_ids.index(batch_id, insert_point)
|
||||
while 1:
|
||||
batch_id = (playlist_id, page_token)
|
||||
if batch_id in result:
|
||||
break
|
||||
batch = client.get_playlist_items(*batch_id, **kwargs)
|
||||
if not batch:
|
||||
break
|
||||
new_batch_ids.append(batch_id)
|
||||
new_data[batch_id] = batch
|
||||
page_token = batch.get('nextPageToken') if fetch_next else None
|
||||
if not page_token:
|
||||
break
|
||||
|
||||
if new_batch_ids:
|
||||
batch_ids[insert_point:insert_point] = new_batch_ids
|
||||
insert_point += len(new_batch_ids)
|
||||
|
||||
if new_data:
|
||||
self.log.debugging and self.log.debug(
|
||||
('Retrieved new data for {num} playlist part(s)',
|
||||
'Batch IDs: {ids}'),
|
||||
num=len(new_data),
|
||||
ids=list(new_data),
|
||||
)
|
||||
result.update(new_data)
|
||||
self.cache_data({
|
||||
'{0},{1}'.format(*batch_id): batch
|
||||
for batch_id, batch in new_data.items()
|
||||
}, defer=defer_cache)
|
||||
|
||||
# Re-sort result to match order of requested IDs
|
||||
# Will only work in Python v3.7+
|
||||
if list(result) != batch_ids[:len(result)]:
|
||||
result = {
|
||||
batch_id: result[batch_id]
|
||||
for batch_id in batch_ids
|
||||
if batch_id in result
|
||||
}
|
||||
|
||||
if not fetch_next:
|
||||
return result[batch_ids[0]]
|
||||
if flatten:
|
||||
items = chain.from_iterable(
|
||||
batch.get('items', [])
|
||||
for batch in result.values()
|
||||
)
|
||||
result = result[batch_ids[-1]]
|
||||
result['items'] = list(items)
|
||||
return result
|
||||
return result
|
||||
|
||||
def get_related_playlists(self, channel_id, defer_cache=False):
|
||||
result = self.get_channels((channel_id,), defer_cache=defer_cache)
|
||||
|
||||
# transform
|
||||
item = None
|
||||
if channel_id != 'mine':
|
||||
item = result.get(channel_id, {})
|
||||
else:
|
||||
for item in result.values():
|
||||
if item:
|
||||
break
|
||||
|
||||
if item is None:
|
||||
return None
|
||||
return item.get('contentDetails', {}).get('relatedPlaylists')
|
||||
|
||||
def get_my_playlists(self, channel_id, page_token, defer_cache=False):
|
||||
result = self._client.get_playlists_of_channel(channel_id, page_token)
|
||||
if not result:
|
||||
return None
|
||||
|
||||
new_data = {
|
||||
yt_item['id']: yt_item
|
||||
for yt_item in result.get('items', [])
|
||||
if yt_item
|
||||
}
|
||||
if new_data:
|
||||
self.log.debugging and self.log.debug(
|
||||
('Retrieved new data for {num} playlist(s)',
|
||||
'Playlist IDs: {ids}'),
|
||||
num=len(new_data),
|
||||
ids=list(new_data),
|
||||
)
|
||||
self.cache_data(new_data, defer=defer_cache)
|
||||
|
||||
return result
|
||||
|
||||
def get_videos(self,
|
||||
ids,
|
||||
live_details=False,
|
||||
suppress_errors=False,
|
||||
defer_cache=False,
|
||||
yt_items_dict=None):
|
||||
ids = tuple(ids)
|
||||
|
||||
context = self._context
|
||||
client = self._client
|
||||
function_cache = context.get_function_cache()
|
||||
|
||||
refresh = context.refresh_requested()
|
||||
forced_cache = not function_cache.run(
|
||||
client.internet_available,
|
||||
function_cache.ONE_MINUTE * 5,
|
||||
_refresh=refresh,
|
||||
)
|
||||
refresh = not forced_cache and refresh
|
||||
|
||||
if refresh or not ids:
|
||||
result = {}
|
||||
else:
|
||||
data_cache = context.get_data_cache()
|
||||
result = data_cache.get_items(
|
||||
ids,
|
||||
None if forced_cache else data_cache.ONE_MONTH,
|
||||
memory_store=self.new_data,
|
||||
)
|
||||
to_update = [id_ for id_ in ids
|
||||
if id_
|
||||
and (id_ not in result
|
||||
or not result[id_]
|
||||
or result[id_].get('_partial')
|
||||
or (yt_items_dict
|
||||
and yt_items_dict.get(id_)
|
||||
and result[id_].get('_unavailable')))]
|
||||
|
||||
if result:
|
||||
self.log.debugging and self.log.debug(
|
||||
('Using cached data for {num} video(s)',
|
||||
'Video IDs: {ids}'),
|
||||
num=len(result),
|
||||
ids=list(result),
|
||||
)
|
||||
if self._progress_dialog:
|
||||
self._progress_dialog.update(steps=len(result) - len(to_update))
|
||||
|
||||
if to_update:
|
||||
notify_and_raise = not suppress_errors
|
||||
new_data = [client.get_videos(list_of_50,
|
||||
live_details,
|
||||
max_results=50,
|
||||
notify=notify_and_raise,
|
||||
raise_exc=notify_and_raise)
|
||||
for list_of_50 in self._list_batch(to_update, n=50)]
|
||||
if any(new_data):
|
||||
new_data = {
|
||||
yt_item['id']: yt_item
|
||||
for batch in new_data
|
||||
for yt_item in batch.get('items', [])
|
||||
if yt_item
|
||||
}
|
||||
else:
|
||||
new_data = None
|
||||
else:
|
||||
new_data = None
|
||||
|
||||
if new_data:
|
||||
self.log.debugging and self.log.debug(
|
||||
('Retrieved new data for {num} video(s)',
|
||||
'Video IDs: {ids}'),
|
||||
num=len(to_update),
|
||||
ids=to_update,
|
||||
)
|
||||
new_data = dict(dict.fromkeys(to_update, {'_unavailable': True}),
|
||||
**new_data)
|
||||
result.update(new_data)
|
||||
self.cache_data(new_data, defer=defer_cache)
|
||||
|
||||
if not result and not new_data and yt_items_dict:
|
||||
result = yt_items_dict
|
||||
self.cache_data(result, defer=defer_cache)
|
||||
|
||||
# Re-sort result to match order of requested IDs
|
||||
# Will only work in Python v3.7+
|
||||
if list(result) != ids[:len(result)]:
|
||||
result = {
|
||||
id_: result[id_]
|
||||
for id_ in ids
|
||||
if id_ in result
|
||||
}
|
||||
|
||||
if context.get_settings().use_local_history():
|
||||
playback_history = context.get_playback_history()
|
||||
played_items = playback_history.get_items(ids)
|
||||
for video_id, play_data in played_items.items():
|
||||
if video_id in result:
|
||||
result[video_id]['play_data'] = play_data
|
||||
|
||||
return result
|
||||
|
||||
def cache_data(self, data=None, defer=False):
|
||||
if defer:
|
||||
if data:
|
||||
self.new_data.update(data)
|
||||
return
|
||||
|
||||
if self.new_data:
|
||||
flush = True
|
||||
if data:
|
||||
self.new_data.update(data)
|
||||
data = self.new_data
|
||||
else:
|
||||
flush = False
|
||||
if data:
|
||||
if self._incognito:
|
||||
self.log.debugging and self.log.debug(
|
||||
('Incognito mode active - discarded data for {num} item(s)',
|
||||
'IDs: {ids}'),
|
||||
num=len(data),
|
||||
ids=list(data),
|
||||
)
|
||||
else:
|
||||
self.log.debugging and self.log.debug(
|
||||
('Storing new data to cache for {num} item(s)',
|
||||
'IDs: {ids}'),
|
||||
num=len(data),
|
||||
ids=list(data),
|
||||
)
|
||||
self._context.get_data_cache().set_items(data)
|
||||
if flush:
|
||||
self.new_data = {}
|
||||
@@ -0,0 +1,16 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
|
||||
Copyright (C) 2016-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from .cipher import Cipher
|
||||
|
||||
|
||||
__all__ = ('Cipher',)
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,176 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
|
||||
Copyright (C) 2016-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .json_script_engine import JsonScriptEngine
|
||||
|
||||
|
||||
class Cipher(object):
|
||||
def __init__(self, context, javascript):
|
||||
self._context = context
|
||||
self._javascript = javascript
|
||||
|
||||
self._object_cache = {}
|
||||
|
||||
def get_signature(self, signature):
|
||||
function_cache = self._context.get_function_cache()
|
||||
json_script = function_cache.run(self._load_javascript,
|
||||
function_cache.ONE_DAY,
|
||||
javascript=self._javascript)
|
||||
|
||||
if json_script:
|
||||
json_script_engine = JsonScriptEngine(json_script)
|
||||
return json_script_engine.execute(signature)
|
||||
|
||||
return ''
|
||||
|
||||
def _load_javascript(self, javascript):
|
||||
function_name = self._find_signature_function_name(javascript)
|
||||
if not function_name:
|
||||
raise Exception('Signature function not found')
|
||||
|
||||
_function = self._find_function_body(function_name, javascript)
|
||||
function_parameter = _function[0].replace('\n', '').split(',')
|
||||
function_body = _function[1].replace('\n', '').split(';')
|
||||
|
||||
json_script = {'actions': []}
|
||||
for line in function_body:
|
||||
# list of characters
|
||||
split_match = re.match(r'%s\s?=\s?%s.split\(""\)' % (function_parameter[0], function_parameter[0]), line)
|
||||
if split_match:
|
||||
json_script['actions'].append({'func': 'list',
|
||||
'params': ['%SIG%']})
|
||||
|
||||
# return
|
||||
return_match = re.match(r'return\s+%s.join\(""\)' % function_parameter[0], line)
|
||||
if return_match:
|
||||
json_script['actions'].append({'func': 'join',
|
||||
'params': ['%SIG%']})
|
||||
|
||||
# real object functions
|
||||
cipher_match = re.match(
|
||||
r'(?P<object_name>[$a-zA-Z0-9]+)\.?\[?"?(?P<function_name>[$a-zA-Z0-9]+)"?\]?\((?P<parameter>[^)]+)\)',
|
||||
line)
|
||||
if cipher_match:
|
||||
object_name = cipher_match.group('object_name')
|
||||
function_name = cipher_match.group('function_name')
|
||||
parameter = cipher_match.group('parameter').split(',')
|
||||
for i in range(len(parameter)):
|
||||
param = parameter[i].strip()
|
||||
param = '%SIG%' if i == 0 else int(param)
|
||||
parameter[i] = param
|
||||
|
||||
# get function from object
|
||||
_function = self._get_object_function(object_name, function_name, javascript)
|
||||
|
||||
# try to find known functions and convert them to our json_script
|
||||
slice_match = re.match(r'[a-zA-Z]+.slice\((?P<a>\d+),[a-zA-Z]+\)', _function['body'][0])
|
||||
if slice_match:
|
||||
a = int(slice_match.group('a'))
|
||||
params = ['%SIG%', a, parameter[1]]
|
||||
json_script['actions'].append({'func': 'slice',
|
||||
'params': params})
|
||||
|
||||
splice_match = re.match(r'[a-zA-Z]+.splice\((?P<a>\d+),[a-zA-Z]+\)', _function['body'][0])
|
||||
if splice_match:
|
||||
a = int(splice_match.group('a'))
|
||||
params = ['%SIG%', a, parameter[1]]
|
||||
json_script['actions'].append({'func': 'splice',
|
||||
'params': params})
|
||||
|
||||
swap_match = re.match(r'var\s?[a-zA-Z]+=\s?[a-zA-Z]+\[0\]', _function['body'][0])
|
||||
if swap_match:
|
||||
params = ['%SIG%', parameter[1]]
|
||||
json_script['actions'].append({'func': 'swap',
|
||||
'params': params})
|
||||
|
||||
reverse_match = re.match(r'[a-zA-Z].reverse\(\)', _function['body'][0])
|
||||
if reverse_match:
|
||||
params = ['%SIG%']
|
||||
json_script['actions'].append({'func': 'reverse',
|
||||
'params': params})
|
||||
|
||||
return json_script
|
||||
|
||||
@staticmethod
|
||||
def _find_signature_function_name(javascript):
|
||||
# match_patterns source is from youtube-dl
|
||||
# https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/youtube.py#L1553
|
||||
# LICENSE: The Unlicense
|
||||
|
||||
match_patterns = (
|
||||
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\bm=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)',
|
||||
r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)',
|
||||
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)(?:;[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\))?',
|
||||
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
|
||||
# Obsolete patterns
|
||||
r'("|\')signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
|
||||
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('
|
||||
)
|
||||
|
||||
for pattern in match_patterns:
|
||||
match = re.search(pattern, javascript)
|
||||
if match:
|
||||
return re.escape(match.group('sig'))
|
||||
|
||||
return ''
|
||||
|
||||
@staticmethod
|
||||
def _find_function_body(function_name, javascript):
|
||||
# normalize function name
|
||||
function_name = function_name.replace('$', '\\$')
|
||||
pattern = r'%s=function\((?P<parameter>\w)\){(?P<body>[a-z=\.\("\)]*;(.*);(?:.+))}' % function_name
|
||||
match = re.search(pattern, javascript)
|
||||
if match:
|
||||
return match.group('parameter'), match.group('body')
|
||||
|
||||
return '', ''
|
||||
|
||||
@staticmethod
|
||||
def _find_object_body(object_name, javascript):
|
||||
object_name = object_name.replace('$', '\\$')
|
||||
match = re.search(r'var %s={(?P<object_body>.*?})};' % object_name, javascript, re.S)
|
||||
if match:
|
||||
return match.group('object_body')
|
||||
return ''
|
||||
|
||||
def _get_object_function(self, object_name, function_name, javascript):
|
||||
if object_name not in self._object_cache:
|
||||
self._object_cache[object_name] = {}
|
||||
elif function_name in self._object_cache[object_name]:
|
||||
return self._object_cache[object_name][function_name]
|
||||
|
||||
_object_body = self._find_object_body(object_name, javascript)
|
||||
_object_body = _object_body.split('},')
|
||||
for _function in _object_body:
|
||||
if not _function.endswith('}'):
|
||||
_function = ''.join((_function, '}'))
|
||||
_function = _function.strip()
|
||||
|
||||
match = re.match(r'(?P<name>[^:]*):function\((?P<parameter>[^)]*)\){(?P<body>[^}]+)}', _function)
|
||||
if match:
|
||||
name = match.group('name').replace('"', '')
|
||||
parameter = match.group('parameter')
|
||||
body = match.group('body').split(';')
|
||||
|
||||
self._object_cache[object_name][name] = {'name': name,
|
||||
'body': body,
|
||||
'params': parameter}
|
||||
|
||||
return self._object_cache[object_name][function_name]
|
||||
@@ -0,0 +1,69 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
|
||||
Copyright (C) 2016-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
|
||||
class JsonScriptEngine(object):
|
||||
def __init__(self, json_script):
|
||||
self._json_script = json_script
|
||||
|
||||
def execute(self, signature):
|
||||
_signature = signature
|
||||
|
||||
_actions = self._json_script['actions']
|
||||
for action in _actions:
|
||||
func = ''.join(('_', action['func']))
|
||||
params = action['params']
|
||||
|
||||
if func == '_return':
|
||||
break
|
||||
|
||||
for i in range(len(params)):
|
||||
param = params[i]
|
||||
if param == '%SIG%':
|
||||
param = _signature
|
||||
params[i] = param
|
||||
break
|
||||
|
||||
method = getattr(self, func)
|
||||
if method:
|
||||
_signature = method(*params)
|
||||
else:
|
||||
raise Exception('Unknown method: %s' % func)
|
||||
|
||||
return _signature
|
||||
|
||||
@staticmethod
|
||||
def _join(signature):
|
||||
return ''.join(signature)
|
||||
|
||||
@staticmethod
|
||||
def _list(signature):
|
||||
return list(signature)
|
||||
|
||||
@staticmethod
|
||||
def _slice(signature, b):
|
||||
del signature[b:]
|
||||
return signature
|
||||
|
||||
@staticmethod
|
||||
def _splice(signature, a, b):
|
||||
del signature[a:b]
|
||||
return signature
|
||||
|
||||
@staticmethod
|
||||
def _reverse(signature):
|
||||
return signature[::-1]
|
||||
|
||||
@staticmethod
|
||||
def _swap(signature, b):
|
||||
c = signature[0]
|
||||
signature[0] = signature[b % len(signature)]
|
||||
signature[b] = c
|
||||
return signature
|
||||
@@ -0,0 +1,292 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
|
||||
Copyright (C) 2016-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from re import compile as re_compile
|
||||
|
||||
from ...kodion import logging
|
||||
from ...kodion.compatibility import parse_qsl, unescape, urlencode, urlsplit
|
||||
from ...kodion.constants import YOUTUBE_HOSTNAMES
|
||||
from ...kodion.network import BaseRequestsClass
|
||||
|
||||
|
||||
class AbstractResolver(BaseRequestsClass):
|
||||
_HEADERS = {
|
||||
'Cache-Control': 'max-age=0',
|
||||
'Accept': ('text/html,'
|
||||
'application/xhtml+xml,'
|
||||
'application/xml;q=0.9,'
|
||||
'image/webp,'
|
||||
'*/*;q=0.8'),
|
||||
# Desktop user agent
|
||||
'User-Agent': ('Mozilla/5.0 (Windows NT 10.0; Win64; x64)'
|
||||
' AppleWebKit/537.36 (KHTML, like Gecko)'
|
||||
' Chrome/119.0.0.0 Safari/537.36'),
|
||||
# Mobile user agent - for testing m.youtube.com redirect
|
||||
# 'User-Agent': ('Mozilla/5.0 (Linux; Android 10; SM-G981B)'
|
||||
# ' AppleWebKit/537.36 (KHTML, like Gecko)'
|
||||
# ' Chrome/80.0.3987.162 Mobile Safari/537.36'),
|
||||
# Old desktop user agent - for testing /supported_browsers redirect
|
||||
# 'User-Agent': ('Mozilla/5.0 (Windows NT 6.1; WOW64)'
|
||||
# ' AppleWebKit/537.36 (KHTML, like Gecko)'
|
||||
# ' Chrome/41.0.2272.118 Safari/537.36'),
|
||||
'DNT': '1',
|
||||
'Accept-Encoding': 'gzip, deflate',
|
||||
'Accept-Language': 'en-US,en;q=0.8,de;q=0.6'
|
||||
}
|
||||
|
||||
def __init__(self, context):
|
||||
self._context = context
|
||||
super(AbstractResolver, self).__init__(context=context)
|
||||
|
||||
def supports_url(self, url, url_components):
|
||||
raise NotImplementedError()
|
||||
|
||||
def resolve(self, url, url_components):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class YouTubeResolver(AbstractResolver):
|
||||
_RE_CHANNEL_URL = re_compile(r'<meta property="og:url" content="'
|
||||
r'(?P<channel_url>[^"]+)'
|
||||
r'">')
|
||||
_RE_CLIP_DETAILS = re_compile(r'(<meta property="og:video:url" content="'
|
||||
r'(?P<video_url>[^"]+)'
|
||||
r'">)'
|
||||
r'|(?P<is_clip>"clipConfig":\{)'
|
||||
r'|("startTimeMs":"(?P<start_time>\d+)")'
|
||||
r'|("endTimeMs":"(?P<end_time>\d+)")')
|
||||
_RE_MUSIC_VIDEO_ID = re_compile(r'"INITIAL_ENDPOINT":.+?videoId\\":\\"'
|
||||
r'(?P<video_id>[^\\"]+)'
|
||||
r'\\"')
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(YouTubeResolver, self).__init__(*args, **kwargs)
|
||||
|
||||
def supports_url(self, url, url_components):
|
||||
hostname = url_components.hostname
|
||||
if hostname not in YOUTUBE_HOSTNAMES:
|
||||
return False
|
||||
|
||||
path = url_components.path.lower()
|
||||
if path.startswith((
|
||||
'/@',
|
||||
'/c/',
|
||||
'/channel/',
|
||||
'/clip',
|
||||
'/user/',
|
||||
)):
|
||||
return 'GET'
|
||||
|
||||
if path.startswith((
|
||||
'/embed',
|
||||
'/live',
|
||||
'/redirect',
|
||||
'/shorts',
|
||||
'/supported_browsers',
|
||||
)):
|
||||
return 'HEAD'
|
||||
|
||||
if path.startswith('/watch'):
|
||||
if hostname.startswith('music.'):
|
||||
return 'GET'
|
||||
return 'HEAD'
|
||||
|
||||
# user channel in the form of youtube.com/username
|
||||
path = path.strip('/').split('/', 1)
|
||||
return 'GET' if len(path) == 1 and path[0] else False
|
||||
|
||||
def resolve(self, url, url_components, method='HEAD'):
|
||||
path = url_components.path.rstrip('/').lower()
|
||||
if path == '/redirect':
|
||||
params = dict(parse_qsl(url_components.query))
|
||||
url = params['q']
|
||||
|
||||
# "sometimes", we get a redirect through a URL of the form
|
||||
# https://.../supported_browsers?next_url=<urlencoded_next_url>&further=parameters&stuck=here
|
||||
# put together query string from both what's encoded inside
|
||||
# next_url and the remaining parameters of this URL...
|
||||
elif path == '/supported_browsers':
|
||||
# top-level query string
|
||||
params = dict(parse_qsl(url_components.query))
|
||||
# components of next_url
|
||||
next_components = urlsplit(params.pop('next_url', ''))
|
||||
if not next_components.scheme or not next_components.netloc:
|
||||
return url
|
||||
# query string encoded inside next_url
|
||||
next_params = dict(parse_qsl(next_components.query))
|
||||
# add/overwrite all other params from top level query string
|
||||
next_params.update(params)
|
||||
# build new URL from these components
|
||||
return next_components._replace(
|
||||
query=urlencode(next_params)
|
||||
).geturl()
|
||||
|
||||
response = self.request(url,
|
||||
method=method,
|
||||
headers=self._HEADERS,
|
||||
# Manually configured cookies to avoid cookie
|
||||
# consent redirect
|
||||
cookies={'SOCS': 'CAISAiAD'},
|
||||
allow_redirects=True)
|
||||
if response is None:
|
||||
return url
|
||||
with response:
|
||||
if response.status_code >= 400:
|
||||
return url
|
||||
url = response.url
|
||||
response_text = response.text if method == 'GET' else None
|
||||
|
||||
if path.startswith('/clip'):
|
||||
all_matches = self._RE_CLIP_DETAILS.finditer(response_text)
|
||||
matched_state = 0
|
||||
url_components = params = start_time = end_time = None
|
||||
for matches in all_matches:
|
||||
matches = matches.groupdict()
|
||||
|
||||
if not matched_state & 1:
|
||||
new_url = matches['video_url']
|
||||
if new_url:
|
||||
matched_state += 1
|
||||
url_components = urlsplit(unescape(new_url))
|
||||
params = dict(parse_qsl(url_components.query))
|
||||
|
||||
if not matched_state & 2:
|
||||
is_clip = matches['is_clip']
|
||||
if is_clip:
|
||||
matched_state += 2
|
||||
else:
|
||||
if not matched_state & 4:
|
||||
start_time = matches['start_time']
|
||||
if start_time:
|
||||
start_time = int(start_time) / 1000
|
||||
matched_state += 4
|
||||
|
||||
if not matched_state & 8:
|
||||
end_time = matches['end_time']
|
||||
if end_time:
|
||||
end_time = int(end_time) / 1000
|
||||
matched_state += 8
|
||||
|
||||
if matched_state != 15:
|
||||
continue
|
||||
|
||||
params.update((
|
||||
('clip', True),
|
||||
('start', start_time),
|
||||
('end', end_time),
|
||||
))
|
||||
return url_components._replace(query=urlencode(params)).geturl()
|
||||
|
||||
elif path == '/watch_videos':
|
||||
params = dict(parse_qsl(url_components.query))
|
||||
new_components = urlsplit(url)
|
||||
new_params = dict(parse_qsl(new_components.query))
|
||||
# add/overwrite all other params from original query string
|
||||
new_params.update(params)
|
||||
# build new URL from these components
|
||||
return new_components._replace(
|
||||
query=urlencode(new_params)
|
||||
).geturl()
|
||||
|
||||
# try to extract the real videoId from the html content
|
||||
elif method == 'GET' and url_components.hostname.startswith('music.'):
|
||||
match = self._RE_MUSIC_VIDEO_ID.search(response_text)
|
||||
if match:
|
||||
params = dict(parse_qsl(url_components.query))
|
||||
params['v'] = match.group('video_id')
|
||||
return url_components._replace(
|
||||
query=urlencode(params)
|
||||
).geturl()
|
||||
|
||||
# try to extract the channel id from the html content
|
||||
# With the channel id we can construct a URL we already work with
|
||||
# https://www.youtube.com/channel/<CHANNEL_ID>
|
||||
elif method == 'GET':
|
||||
match = self._RE_CHANNEL_URL.search(response_text)
|
||||
if match:
|
||||
new_url = match.group('channel_url')
|
||||
if path.endswith(('/live', '/streams')):
|
||||
url_components = urlsplit(unescape(new_url))
|
||||
params = dict(parse_qsl(url_components.query))
|
||||
params['live'] = 1
|
||||
return url_components._replace(
|
||||
query=urlencode(params)
|
||||
).geturl()
|
||||
if new_url != 'undefined':
|
||||
return new_url
|
||||
|
||||
return url
|
||||
|
||||
|
||||
class CommonResolver(AbstractResolver):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CommonResolver, self).__init__(*args, **kwargs)
|
||||
|
||||
def supports_url(self, url, url_components):
|
||||
if url_components.hostname in YOUTUBE_HOSTNAMES:
|
||||
return False
|
||||
return 'HEAD'
|
||||
|
||||
def resolve(self, url, url_components, method='HEAD'):
|
||||
response = self.request(url,
|
||||
method=method,
|
||||
headers=self._HEADERS,
|
||||
allow_redirects=True)
|
||||
if response is None:
|
||||
return url
|
||||
with response:
|
||||
if response.status_code >= 400:
|
||||
return url
|
||||
return response.url
|
||||
|
||||
|
||||
class UrlResolver(object):
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def __init__(self, context):
|
||||
self._context = context
|
||||
self._resolvers = (
|
||||
('common_resolver', CommonResolver(context)),
|
||||
('youtube_resolver', YouTubeResolver(context)),
|
||||
)
|
||||
|
||||
def _resolve(self, url):
|
||||
# try one of the resolvers
|
||||
resolved_url = url
|
||||
for resolver_name, resolver in self._resolvers:
|
||||
url_components = urlsplit(resolved_url)
|
||||
method = resolver.supports_url(resolved_url, url_components)
|
||||
if not method:
|
||||
continue
|
||||
|
||||
self.log.debug('Resolving {uri!r} using {name} {method}',
|
||||
uri=resolved_url,
|
||||
name=resolver_name,
|
||||
method=method)
|
||||
resolved_url = resolver.resolve(resolved_url,
|
||||
url_components,
|
||||
method)
|
||||
self.log.debug('Resolved to %r', resolved_url)
|
||||
return resolved_url
|
||||
|
||||
def resolve(self, url):
|
||||
function_cache = self._context.get_function_cache()
|
||||
resolved_url = function_cache.run(
|
||||
self._resolve,
|
||||
function_cache.ONE_DAY,
|
||||
_refresh=self._context.refresh_requested(),
|
||||
url=url,
|
||||
)
|
||||
if not resolved_url or resolved_url == '/':
|
||||
return url
|
||||
|
||||
return resolved_url
|
||||
@@ -0,0 +1,335 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
|
||||
Copyright (C) 2016-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from re import (
|
||||
IGNORECASE,
|
||||
compile as re_compile,
|
||||
)
|
||||
|
||||
from . import utils
|
||||
from ...kodion import logging
|
||||
from ...kodion.compatibility import parse_qsl, urlsplit
|
||||
from ...kodion.constants import (
|
||||
CHANNEL_ID,
|
||||
CHANNEL_IDS,
|
||||
CLIP,
|
||||
END,
|
||||
LIVE,
|
||||
ORDER,
|
||||
PATHS,
|
||||
PLAYLIST_ID,
|
||||
PLAYLIST_IDS,
|
||||
SEEK,
|
||||
START,
|
||||
VIDEO_ID,
|
||||
VIDEO_IDS,
|
||||
YOUTUBE_HOSTNAMES,
|
||||
)
|
||||
from ...kodion.items import DirectoryItem, UriItem, VideoItem
|
||||
from ...kodion.utils.convert_format import duration_to_seconds
|
||||
|
||||
|
||||
class UrlToItemConverter(object):
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
RE_PATH_ID = re_compile(r'/[^/]*?[/@](?P<id>[^/?#]+)', IGNORECASE)
|
||||
|
||||
def __init__(self, flatten=True):
|
||||
self._flatten = flatten
|
||||
|
||||
self._video_id_dict = {}
|
||||
self._video_items = []
|
||||
|
||||
self._playlist_id_dict = {}
|
||||
self._playlist_items = []
|
||||
self._playlist_ids = []
|
||||
|
||||
self._channel_id_dict = {}
|
||||
self._channel_items = []
|
||||
self._channel_ids = []
|
||||
self._channel_items_dict = {}
|
||||
|
||||
self._new_params = None
|
||||
|
||||
def add_url(self, url):
|
||||
parsed_url = urlsplit(url)
|
||||
if (not parsed_url.hostname
|
||||
or parsed_url.hostname.lower() not in YOUTUBE_HOSTNAMES):
|
||||
self.log.debug('Unknown hostname "{hostname}" in url "{url}"',
|
||||
hostname=parsed_url.hostname,
|
||||
url=url)
|
||||
return False
|
||||
|
||||
url_params = dict(parse_qsl(parsed_url.query))
|
||||
new_params = {
|
||||
new: process(url_params[old]) if process else url_params[old]
|
||||
for old, new, process in (
|
||||
('end', END, duration_to_seconds),
|
||||
('start', START, duration_to_seconds),
|
||||
('t', SEEK, duration_to_seconds),
|
||||
('list', PLAYLIST_ID, False),
|
||||
('v', VIDEO_ID, False),
|
||||
('live', LIVE, False),
|
||||
('clip', CLIP, False),
|
||||
('video_ids', VIDEO_IDS, False),
|
||||
)
|
||||
if old in url_params
|
||||
}
|
||||
|
||||
path = parsed_url.path.rstrip('/').lower()
|
||||
if path.startswith(('/playlist', '/watch')):
|
||||
pass
|
||||
elif path.startswith(('/c/', '/channel/', '/u/', '/user/', '/@')):
|
||||
re_match = self.RE_PATH_ID.match(parsed_url.path)
|
||||
new_params[CHANNEL_ID] = re_match.group('id')
|
||||
if ('live' not in new_params
|
||||
and path.endswith(('/live', '/streams'))):
|
||||
new_params['live'] = 1
|
||||
elif path.startswith(('/clip/', '/embed/', '/live/', '/shorts/')):
|
||||
re_match = self.RE_PATH_ID.match(parsed_url.path)
|
||||
new_params[VIDEO_ID] = re_match.group('id')
|
||||
else:
|
||||
self.log.debug('Unknown path "{path}" in url "{url}"',
|
||||
path=parsed_url.path,
|
||||
url=url)
|
||||
self._new_params = None
|
||||
return False
|
||||
self._new_params = new_params
|
||||
return True
|
||||
|
||||
def create_item(self, context, as_uri=False):
|
||||
new_params = self._new_params
|
||||
item = None
|
||||
|
||||
if VIDEO_IDS in new_params:
|
||||
item_uri = context.create_uri(PATHS.PLAY, new_params)
|
||||
if as_uri:
|
||||
return item_uri
|
||||
|
||||
for video_id in new_params[VIDEO_IDS].split(','):
|
||||
item = VideoItem(
|
||||
name='',
|
||||
uri=context.create_uri(
|
||||
PATHS.PLAY,
|
||||
dict(new_params, video_id=video_id),
|
||||
),
|
||||
video_id=video_id,
|
||||
)
|
||||
items = self._video_id_dict.setdefault(video_id, [])
|
||||
items.append(item)
|
||||
|
||||
elif VIDEO_ID in new_params:
|
||||
item_uri = context.create_uri(PATHS.PLAY, new_params)
|
||||
if as_uri:
|
||||
return item_uri
|
||||
|
||||
video_id = new_params[VIDEO_ID]
|
||||
|
||||
item = VideoItem(
|
||||
name='',
|
||||
uri=item_uri,
|
||||
video_id=video_id,
|
||||
)
|
||||
items = self._video_id_dict.setdefault(video_id, [])
|
||||
items.append(item)
|
||||
|
||||
if PLAYLIST_ID in new_params:
|
||||
playlist_id = new_params[PLAYLIST_ID]
|
||||
|
||||
item_uri = context.create_uri(
|
||||
(PATHS.PLAYLIST, playlist_id),
|
||||
new_params,
|
||||
)
|
||||
if as_uri:
|
||||
return item_uri
|
||||
|
||||
if self._flatten:
|
||||
self._playlist_ids.append(playlist_id)
|
||||
return playlist_id
|
||||
|
||||
item = DirectoryItem(
|
||||
name='',
|
||||
uri=item_uri,
|
||||
playlist_id=playlist_id,
|
||||
)
|
||||
items = self._playlist_id_dict.setdefault(playlist_id, [])
|
||||
items.append(item)
|
||||
|
||||
if CHANNEL_ID in new_params:
|
||||
channel_id = new_params[CHANNEL_ID]
|
||||
live = new_params.get('live')
|
||||
|
||||
item_uri = context.create_uri(
|
||||
PATHS.PLAY if live else (PATHS.CHANNEL, channel_id),
|
||||
new_params
|
||||
)
|
||||
if as_uri:
|
||||
return item_uri
|
||||
|
||||
if not live and self._flatten:
|
||||
self._channel_ids.append(channel_id)
|
||||
return channel_id
|
||||
|
||||
item = VideoItem(
|
||||
name='',
|
||||
uri=item_uri,
|
||||
channel_id=channel_id,
|
||||
) if live else DirectoryItem(
|
||||
name='',
|
||||
uri=item_uri,
|
||||
channel_id=channel_id,
|
||||
)
|
||||
items = self._channel_id_dict.setdefault(channel_id, [])
|
||||
items.append(item)
|
||||
|
||||
return item
|
||||
|
||||
def process_url(self, url, context, as_uri=False):
|
||||
if not self.add_url(url):
|
||||
return False
|
||||
item = self.create_item(context, as_uri=as_uri)
|
||||
if not item:
|
||||
self.log.debug('No items found in url "%s"', url)
|
||||
return item
|
||||
|
||||
def process_urls(self, urls, context):
|
||||
for url in urls:
|
||||
self.process_url(url, context)
|
||||
|
||||
def get_items(self, provider, context, skip_title=False):
|
||||
result = []
|
||||
query = context.get_param('q')
|
||||
|
||||
if self._channel_ids:
|
||||
item_label = context.localize('channels')
|
||||
channels_item = DirectoryItem(
|
||||
context.get_ui().bold(item_label),
|
||||
context.create_uri(
|
||||
(PATHS.SEARCH, 'links',),
|
||||
{
|
||||
CHANNEL_IDS: ','.join(self._channel_ids),
|
||||
'q': query,
|
||||
},
|
||||
) if query else context.create_uri(
|
||||
(PATHS.DESCRIPTION_LINKS,),
|
||||
{
|
||||
CHANNEL_IDS: ','.join(self._channel_ids),
|
||||
},
|
||||
),
|
||||
image='{media}/channels.png',
|
||||
category_label=item_label,
|
||||
)
|
||||
result.append(channels_item)
|
||||
|
||||
if self._playlist_ids:
|
||||
if context.get_param('uri'):
|
||||
playlists_item = UriItem(
|
||||
context.create_uri(
|
||||
(PATHS.PLAY,),
|
||||
{
|
||||
PLAYLIST_IDS: ','.join(self._playlist_ids),
|
||||
ORDER: 'normal',
|
||||
},
|
||||
),
|
||||
playable=True,
|
||||
)
|
||||
else:
|
||||
item_label = context.localize('playlists')
|
||||
playlists_item = DirectoryItem(
|
||||
context.get_ui().bold(item_label),
|
||||
context.create_uri(
|
||||
(PATHS.SEARCH, 'links',),
|
||||
{
|
||||
PLAYLIST_IDS: ','.join(self._playlist_ids),
|
||||
'q': query,
|
||||
},
|
||||
) if query else context.create_uri(
|
||||
(PATHS.DESCRIPTION_LINKS,),
|
||||
{
|
||||
PLAYLIST_IDS: ','.join(self._playlist_ids),
|
||||
},
|
||||
),
|
||||
image='{media}/playlist.png',
|
||||
category_label=item_label,
|
||||
)
|
||||
result.append(playlists_item)
|
||||
|
||||
if self._channel_id_dict:
|
||||
result += self.get_channel_items(provider, context, skip_title)
|
||||
|
||||
if self._playlist_id_dict:
|
||||
result += self.get_playlist_items(provider, context, skip_title)
|
||||
|
||||
if self._video_id_dict:
|
||||
result += self.get_video_items(provider, context, skip_title)
|
||||
|
||||
return result
|
||||
|
||||
def get_video_items(self, provider, context, skip_title=False):
|
||||
if self._video_items:
|
||||
return self._video_items
|
||||
|
||||
utils.update_video_items(
|
||||
provider,
|
||||
context,
|
||||
self._video_id_dict,
|
||||
channel_items_dict=self._channel_items_dict,
|
||||
)
|
||||
utils.update_channel_info(provider, context, self._channel_items_dict)
|
||||
|
||||
self._video_items = [
|
||||
video_item
|
||||
for video_items in self._video_id_dict.values()
|
||||
for video_item in video_items
|
||||
if skip_title or video_item.get_name()
|
||||
]
|
||||
return self._video_items
|
||||
|
||||
def get_playlist_items(self, provider, context, skip_title=False):
|
||||
if self._playlist_items:
|
||||
return self._playlist_items
|
||||
|
||||
utils.update_playlist_items(
|
||||
provider,
|
||||
context,
|
||||
self._playlist_id_dict,
|
||||
channel_items_dict=self._channel_items_dict,
|
||||
)
|
||||
utils.update_channel_info(provider, context, self._channel_items_dict)
|
||||
|
||||
self._playlist_items = [
|
||||
playlist_item
|
||||
for playlist_items in self._playlist_id_dict.values()
|
||||
for playlist_item in playlist_items
|
||||
if skip_title or playlist_item.get_name()
|
||||
]
|
||||
return self._playlist_items
|
||||
|
||||
def get_channel_items(self, provider, context, skip_title=False):
|
||||
if self._channel_items:
|
||||
return self._channel_items
|
||||
|
||||
utils.update_channel_items(
|
||||
provider,
|
||||
context,
|
||||
self._channel_id_dict,
|
||||
channel_items_dict=self._channel_items_dict,
|
||||
)
|
||||
utils.update_channel_info(provider, context, self._channel_items_dict)
|
||||
|
||||
self._channel_items = [
|
||||
channel_item
|
||||
for channel_items in self._channel_id_dict.values()
|
||||
for channel_item in channel_items
|
||||
if skip_title or channel_item.get_name()
|
||||
]
|
||||
return self._channel_items
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,202 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
|
||||
Copyright (C) 2016-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from ..youtube_exceptions import LoginException
|
||||
from ...kodion import logging
|
||||
|
||||
|
||||
SIGN_IN = 'in'
|
||||
SIGN_OUT = 'out'
|
||||
|
||||
|
||||
def _do_logout(provider, context, client=None, **kwargs):
|
||||
ui = context.get_ui()
|
||||
if not context.get_param('confirmed') and not ui.on_yes_no_input(
|
||||
context.localize('sign.out'),
|
||||
context.localize('are_you_sure')
|
||||
):
|
||||
return False
|
||||
|
||||
if not client:
|
||||
client = provider.get_client(context)
|
||||
|
||||
access_manager = context.get_access_manager()
|
||||
addon_id = context.get_param('addon_id', None)
|
||||
|
||||
success = True
|
||||
refresh_tokens, num_refresh_tokens = access_manager.get_refresh_tokens()
|
||||
if num_refresh_tokens:
|
||||
for refresh_token in frozenset(refresh_tokens):
|
||||
try:
|
||||
if refresh_token:
|
||||
client.revoke(refresh_token)
|
||||
except LoginException:
|
||||
success = False
|
||||
|
||||
provider.reset_client(context=context, **kwargs)
|
||||
access_manager.update_access_token(
|
||||
addon_id, access_token='', expiry=-1, refresh_token='',
|
||||
)
|
||||
return success
|
||||
|
||||
|
||||
def _do_login(provider, context, client=None, **kwargs):
|
||||
if not client:
|
||||
client = provider.get_client(context)
|
||||
|
||||
access_manager = context.get_access_manager()
|
||||
addon_id = context.get_param('addon_id', None)
|
||||
localize = context.localize
|
||||
ui = context.get_ui()
|
||||
|
||||
ui.on_ok(localize('sign.multi.title'), localize('sign.multi.text'))
|
||||
|
||||
(
|
||||
access_tokens,
|
||||
num_access_tokens,
|
||||
expiry_timestamp,
|
||||
) = access_manager.get_access_tokens()
|
||||
(
|
||||
refresh_tokens,
|
||||
num_refresh_tokens,
|
||||
) = access_manager.get_refresh_tokens()
|
||||
token_types = ['tv', 'user', 'vr', 'dev']
|
||||
new_access_tokens = dict.fromkeys(token_types, None)
|
||||
for token_idx, token_type in enumerate(token_types):
|
||||
try:
|
||||
access_token = access_tokens[token_idx]
|
||||
refresh_token = refresh_tokens[token_idx]
|
||||
if access_token and refresh_token:
|
||||
new_access_tokens[token_type] = access_token
|
||||
new_token = (access_token, expiry_timestamp, refresh_token)
|
||||
token_types[token_idx] = new_token
|
||||
continue
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
new_token = ('', expiry_timestamp, '')
|
||||
try:
|
||||
json_data = client.request_device_and_user_code(token_idx)
|
||||
if not json_data:
|
||||
continue
|
||||
|
||||
interval = int(json_data.get('interval', 5))
|
||||
if interval > 60:
|
||||
interval = 5
|
||||
device_code = json_data['device_code']
|
||||
user_code = json_data['user_code']
|
||||
verification_url = json_data.get('verification_url')
|
||||
if verification_url:
|
||||
if verification_url.startswith('https://www.'):
|
||||
verification_url = verification_url[12:]
|
||||
else:
|
||||
verification_url = 'youtube.com/activate'
|
||||
|
||||
message = ''.join((
|
||||
localize('sign.go_to', ui.bold(verification_url)),
|
||||
'[CR]',
|
||||
localize('sign.enter_code'),
|
||||
' ',
|
||||
ui.bold(user_code),
|
||||
))
|
||||
|
||||
with ui.create_progress_dialog(
|
||||
heading=localize('sign.in'),
|
||||
message=message,
|
||||
background=False
|
||||
) as progress_dialog:
|
||||
steps = ((10 * 60) // interval) # 10 Minutes
|
||||
progress_dialog.set_total(steps)
|
||||
for _ in range(steps):
|
||||
progress_dialog.update()
|
||||
json_data = client.request_access_token(
|
||||
token_idx, device_code
|
||||
)
|
||||
if not json_data:
|
||||
break
|
||||
|
||||
log_data = json_data.copy()
|
||||
if 'access_token' in log_data:
|
||||
log_data['access_token'] = '<redacted>'
|
||||
if 'refresh_token' in log_data:
|
||||
log_data['refresh_token'] = '<redacted>'
|
||||
logging.debug('Requesting access token: {data!r}',
|
||||
data=log_data)
|
||||
|
||||
if 'error' not in json_data:
|
||||
access_token = json_data.get('access_token', '')
|
||||
refresh_token = json_data.get('refresh_token', '')
|
||||
if not access_token and not refresh_token:
|
||||
expiry = 0
|
||||
else:
|
||||
expiry = int(json_data.get('expires_in', 3600))
|
||||
new_token = (access_token, expiry, refresh_token)
|
||||
break
|
||||
|
||||
if json_data['error'] != 'authorization_pending':
|
||||
message = json_data['error']
|
||||
title = '%s: %s' % (context.get_name(), message)
|
||||
ui.show_notification(message, title)
|
||||
logging.error_trace('Access token request error - %s',
|
||||
message)
|
||||
break
|
||||
|
||||
if progress_dialog.is_aborted():
|
||||
break
|
||||
|
||||
context.sleep(interval)
|
||||
except LoginException:
|
||||
_do_logout(provider, context, client=client)
|
||||
break
|
||||
finally:
|
||||
new_access_tokens[token_type] = new_token[0]
|
||||
token_types[token_idx] = new_token
|
||||
logging.debug(('YouTube Login:',
|
||||
'Type: {token!r}',
|
||||
'Access token: {has_access_token!r}',
|
||||
'Expires: {expiry!r}',
|
||||
'Refresh token: {has_refresh_token!r}'),
|
||||
token=token_type,
|
||||
has_access_token=bool(new_token[0]),
|
||||
expiry=new_token[1],
|
||||
has_refresh_token=bool(new_token[2]))
|
||||
else:
|
||||
provider.reset_client(
|
||||
context=context,
|
||||
access_tokens=new_access_tokens,
|
||||
**kwargs
|
||||
)
|
||||
access_manager.update_access_token(addon_id, *zip(*token_types))
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def process(mode, provider, context, client=None, refresh=True, **kwargs):
|
||||
if mode == SIGN_OUT:
|
||||
signed_out = _do_logout(
|
||||
provider,
|
||||
context,
|
||||
client=client,
|
||||
**kwargs
|
||||
)
|
||||
return signed_out, {provider.FORCE_REFRESH: refresh}
|
||||
|
||||
if mode == SIGN_IN:
|
||||
signed_in = _do_login(
|
||||
provider,
|
||||
context,
|
||||
client=client,
|
||||
**kwargs
|
||||
)
|
||||
return signed_in, {provider.FORCE_REFRESH: refresh and signed_in}
|
||||
|
||||
return None, None
|
||||
@@ -0,0 +1,578 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
|
||||
Copyright (C) 2016-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
import json
|
||||
import random
|
||||
from collections import defaultdict
|
||||
|
||||
from ..helper import utils, v3
|
||||
from ..youtube_exceptions import YouTubeException
|
||||
from ...kodion import logging
|
||||
from ...kodion.compatibility import string_type, urlencode, urlunsplit, xbmc
|
||||
from ...kodion.constants import (
|
||||
BUSY_FLAG,
|
||||
CHANNEL_ID,
|
||||
CONTENT,
|
||||
FORCE_PLAY_PARAMS,
|
||||
INCOGNITO,
|
||||
ORDER,
|
||||
PATHS,
|
||||
PLAYBACK_INIT,
|
||||
PLAYER_DATA,
|
||||
PLAYLIST_ID,
|
||||
PLAYLIST_IDS,
|
||||
PLAYLIST_PATH,
|
||||
PLAYLIST_POSITION,
|
||||
PLAY_FORCE_AUDIO,
|
||||
PLAY_PROMPT_QUALITY,
|
||||
PLAY_STRM,
|
||||
PLAY_USING,
|
||||
SCREENSAVER,
|
||||
SERVER_WAKEUP,
|
||||
TRAKT_PAUSE_FLAG,
|
||||
VIDEO_ID,
|
||||
VIDEO_IDS,
|
||||
)
|
||||
from ...kodion.items import AudioItem, UriItem, VideoItem
|
||||
from ...kodion.network import get_connect_address
|
||||
from ...kodion.utils.datetime import datetime_to_since
|
||||
from ...kodion.utils.redact import redact_params
|
||||
|
||||
|
||||
def _play_stream(provider, context):
|
||||
ui = context.get_ui()
|
||||
params = context.get_params()
|
||||
video_id = params.get(VIDEO_ID)
|
||||
if not video_id:
|
||||
ui.show_notification(context.localize('error.no_streams_found'))
|
||||
return False
|
||||
|
||||
client = provider.get_client(context)
|
||||
settings = context.get_settings()
|
||||
|
||||
incognito = params.get(INCOGNITO, False)
|
||||
screensaver = params.get(SCREENSAVER, False)
|
||||
|
||||
audio_only = False
|
||||
is_external = ui.get_property(PLAY_USING)
|
||||
if ((is_external and settings.alternative_player_web_urls())
|
||||
or settings.default_player_web_urls()):
|
||||
stream = {
|
||||
'url': 'https://youtu.be/{0}'.format(video_id),
|
||||
}
|
||||
yt_item = None
|
||||
else:
|
||||
ask_for_quality = settings.ask_for_video_quality()
|
||||
if ui.pop_property(PLAY_PROMPT_QUALITY) and not screensaver:
|
||||
ask_for_quality = True
|
||||
if ui.pop_property(PLAY_FORCE_AUDIO):
|
||||
audio_only = True
|
||||
else:
|
||||
audio_only = settings.audio_only()
|
||||
use_mpd = ((not is_external or settings.alternative_player_mpd())
|
||||
and settings.use_mpd_videos()
|
||||
and context.ipc_exec(SERVER_WAKEUP, timeout=5))
|
||||
|
||||
try:
|
||||
streams, yt_item = client.load_stream_info(
|
||||
video_id=video_id,
|
||||
ask_for_quality=ask_for_quality,
|
||||
audio_only=audio_only,
|
||||
incognito=incognito,
|
||||
use_mpd=use_mpd,
|
||||
)
|
||||
except YouTubeException as exc:
|
||||
logging.exception('Error')
|
||||
ui.show_notification(message=exc.get_message())
|
||||
return False
|
||||
|
||||
if not streams:
|
||||
ui.show_notification(context.localize('error.no_streams_found'))
|
||||
logging.debug('No streams found')
|
||||
return False
|
||||
|
||||
stream = _select_stream(
|
||||
context,
|
||||
streams,
|
||||
ask_for_quality=ask_for_quality,
|
||||
audio_only=audio_only,
|
||||
use_mpd=use_mpd,
|
||||
)
|
||||
if stream is None:
|
||||
return False
|
||||
|
||||
video_type = stream.get('video')
|
||||
if video_type and video_type.get('rtmpe'):
|
||||
ui.show_notification(context.localize('error.rtmpe_not_supported'))
|
||||
return False
|
||||
|
||||
if not screensaver and settings.get_bool(settings.PLAY_SUGGESTED):
|
||||
utils.add_related_video_to_playlist(provider,
|
||||
context,
|
||||
client,
|
||||
v3,
|
||||
video_id)
|
||||
|
||||
metadata = stream.get('meta', {})
|
||||
if is_external:
|
||||
url = urlunsplit((
|
||||
'http',
|
||||
get_connect_address(context=context, as_netloc=True),
|
||||
PATHS.REDIRECT,
|
||||
urlencode({'url': stream['url']}),
|
||||
'',
|
||||
))
|
||||
stream['url'] = url
|
||||
|
||||
media_item = (AudioItem if audio_only or not video_type else VideoItem)(
|
||||
name=metadata.get('title', ''),
|
||||
uri=stream['url'],
|
||||
video_id=video_id,
|
||||
)
|
||||
|
||||
use_history = not (screensaver or incognito or stream.get('live'))
|
||||
use_remote_history = use_history and settings.use_remote_history()
|
||||
use_local_history = use_history and settings.use_local_history()
|
||||
|
||||
utils.update_play_info(
|
||||
provider, context, video_id, media_item, stream, yt_item
|
||||
)
|
||||
|
||||
seek_time = 0.0 if params.get('resume') else params.get('seek', 0.0)
|
||||
start_time = params.get('start', 0.0)
|
||||
end_time = params.get('end', 0.0)
|
||||
|
||||
if start_time:
|
||||
media_item.set_start_time(start_time)
|
||||
# Setting the duration based on end_time can cause issues with
|
||||
# listing/sorting and other addons that monitor playback
|
||||
# if end_time:
|
||||
# video_item.set_duration_from_seconds(end_time)
|
||||
|
||||
play_count = use_local_history and media_item.get_play_count() or 0
|
||||
playback_stats = stream.get('playback_stats')
|
||||
|
||||
playback_data = {
|
||||
VIDEO_ID: video_id,
|
||||
CHANNEL_ID: metadata.get('channel', {}).get('id', ''),
|
||||
'video_status': metadata.get('status', {}),
|
||||
'playing_file': media_item.get_uri(),
|
||||
'play_count': play_count,
|
||||
'use_remote_history': use_remote_history,
|
||||
'use_local_history': use_local_history,
|
||||
'playback_stats': playback_stats,
|
||||
'seek_time': seek_time,
|
||||
'start_time': start_time,
|
||||
'end_time': end_time,
|
||||
'clip': params.get('clip', False),
|
||||
'refresh_only': screensaver
|
||||
}
|
||||
|
||||
ui.set_property(PLAYER_DATA,
|
||||
value=playback_data,
|
||||
process=json.dumps,
|
||||
log_process=redact_params)
|
||||
ui.set_property(TRAKT_PAUSE_FLAG, raw=True)
|
||||
context.send_notification(PLAYBACK_INIT, playback_data)
|
||||
return media_item
|
||||
|
||||
|
||||
def _play_playlist(provider, context):
|
||||
video_items = []
|
||||
params = context.get_params()
|
||||
|
||||
action = params.get('action')
|
||||
if not action and context.get_handle() == -1:
|
||||
action = 'play'
|
||||
|
||||
playlist_ids = params.get(PLAYLIST_IDS)
|
||||
if not playlist_ids:
|
||||
playlist_id = params.get(PLAYLIST_ID)
|
||||
if playlist_id:
|
||||
playlist_ids = [playlist_id]
|
||||
|
||||
video_ids = params.get(VIDEO_IDS)
|
||||
if not playlist_ids and not video_ids:
|
||||
video_id = params.get(VIDEO_ID)
|
||||
if video_id:
|
||||
video_ids = [video_id]
|
||||
else:
|
||||
logging.warning_trace('No playlist found to play')
|
||||
return False, None
|
||||
|
||||
resource_manager = provider.get_resource_manager(context)
|
||||
ui = context.get_ui()
|
||||
|
||||
with ui.create_progress_dialog(
|
||||
heading=context.localize('playlist.progress.updating'),
|
||||
message=context.localize('please_wait'),
|
||||
background=True,
|
||||
) as progress_dialog:
|
||||
if playlist_ids:
|
||||
json_data = resource_manager.get_playlist_items(playlist_ids)
|
||||
if not json_data:
|
||||
return False, None
|
||||
chunks = json_data.values()
|
||||
total = sum(len(chunk.get('items', [])) for chunk in chunks)
|
||||
elif video_ids:
|
||||
json_data = resource_manager.get_videos(video_ids,
|
||||
live_details=True)
|
||||
if not json_data:
|
||||
return False, None
|
||||
chunks = [{
|
||||
'kind': 'plugin#playlistItemListResponse',
|
||||
'items': json_data.values(),
|
||||
}]
|
||||
total = len(json_data)
|
||||
|
||||
progress_dialog.reset_total(total)
|
||||
|
||||
# start the loop and fill the list with video items
|
||||
for chunk in chunks:
|
||||
result = v3.response_to_items(provider,
|
||||
context,
|
||||
chunk,
|
||||
process_next_page=False,
|
||||
hide_progress=True)
|
||||
video_items.extend(result)
|
||||
|
||||
progress_dialog.update(steps=len(result))
|
||||
|
||||
if not video_items:
|
||||
return False, None
|
||||
|
||||
result = process_items_for_playlist(context, video_items, action=action)
|
||||
if action == 'list':
|
||||
options = {
|
||||
provider.CACHE_TO_DISC: True,
|
||||
provider.FORCE_RESOLVE: False,
|
||||
provider.UPDATE_LISTING: False,
|
||||
provider.CONTENT_TYPE: {
|
||||
'content_type': CONTENT.VIDEO_CONTENT,
|
||||
'sub_type': None,
|
||||
'category_label': None,
|
||||
},
|
||||
}
|
||||
else:
|
||||
options = {
|
||||
provider.CACHE_TO_DISC: False,
|
||||
provider.FORCE_RESOLVE: True,
|
||||
provider.UPDATE_LISTING: True,
|
||||
}
|
||||
return result, options
|
||||
|
||||
|
||||
def _play_channel_live(provider, context):
|
||||
channel_id = context.get_param(CHANNEL_ID)
|
||||
_, json_data = provider.get_client(context).search_with_params(params={
|
||||
'type': 'video',
|
||||
'eventType': 'live',
|
||||
'channelId': channel_id,
|
||||
'safeSearch': 'none',
|
||||
})
|
||||
if not json_data:
|
||||
return False
|
||||
|
||||
channel_streams = v3.response_to_items(provider,
|
||||
context,
|
||||
json_data,
|
||||
process_next_page=False)
|
||||
if not channel_streams:
|
||||
return False
|
||||
|
||||
return (
|
||||
process_items_for_playlist(
|
||||
context,
|
||||
channel_streams,
|
||||
action='play' if context.get_handle() == -1 else None,
|
||||
play_from=context.get_param('live', 1),
|
||||
),
|
||||
{
|
||||
provider.CACHE_TO_DISC: False,
|
||||
provider.FORCE_RESOLVE: True,
|
||||
provider.UPDATE_LISTING: True,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def _select_stream(context,
|
||||
stream_data_list,
|
||||
ask_for_quality,
|
||||
audio_only,
|
||||
use_mpd=True):
|
||||
settings = context.get_settings()
|
||||
if settings.use_isa():
|
||||
isa_capabilities = context.inputstream_adaptive_capabilities()
|
||||
use_adaptive = bool(isa_capabilities)
|
||||
use_live_adaptive = use_adaptive and 'live' in isa_capabilities
|
||||
use_live_mpd = use_live_adaptive and settings.use_mpd_live_streams()
|
||||
else:
|
||||
use_adaptive = False
|
||||
use_live_adaptive = False
|
||||
use_live_mpd = False
|
||||
|
||||
if audio_only:
|
||||
logging.debug('Audio only')
|
||||
stream_list = [item for item in stream_data_list
|
||||
if 'video' not in item]
|
||||
else:
|
||||
stream_list = [
|
||||
item for item in stream_data_list
|
||||
if (not item.get('adaptive')
|
||||
or (not item.get('live')
|
||||
and ((use_mpd and item.get('dash/video'))
|
||||
or (use_adaptive and item.get('hls/video'))))
|
||||
or (item.get('live')
|
||||
and ((use_live_mpd and item.get('dash/video'))
|
||||
or (use_live_adaptive and item.get('hls/video')))))
|
||||
]
|
||||
|
||||
if not stream_list:
|
||||
logging.debug('No streams found')
|
||||
return None
|
||||
|
||||
def _stream_sort(_stream):
|
||||
return _stream.get('sort', [0, 0, 0])
|
||||
|
||||
stream_list.sort(key=_stream_sort, reverse=True)
|
||||
num_streams = len(stream_list)
|
||||
|
||||
if logging.debugging:
|
||||
def _default_NA():
|
||||
return 'N/A'
|
||||
|
||||
logging.debug('%d available stream(s)', num_streams)
|
||||
for idx, stream in enumerate(stream_list):
|
||||
logging.debug(('Stream {idx}',
|
||||
'Container: {stream[container]}',
|
||||
'Adaptive: {stream[adaptive]}',
|
||||
'Audio: {stream[audio]}',
|
||||
'Video: {stream[video]}',
|
||||
'Sort: {stream[sort]}'),
|
||||
idx=idx,
|
||||
stream=defaultdict(_default_NA, stream))
|
||||
|
||||
if ask_for_quality and num_streams > 1:
|
||||
selected_stream = context.get_ui().on_select(
|
||||
context.localize('select_video_quality'),
|
||||
[stream['title'] for stream in stream_list],
|
||||
)
|
||||
if selected_stream == -1:
|
||||
logging.debug('No stream selected')
|
||||
return None
|
||||
else:
|
||||
selected_stream = 0
|
||||
|
||||
logging.debug('Stream %d selected', selected_stream)
|
||||
return stream_list[selected_stream]
|
||||
|
||||
|
||||
def process_items_for_playlist(context,
|
||||
items,
|
||||
action=None,
|
||||
play_from=None,
|
||||
order=None,
|
||||
recent_days=None):
|
||||
params = context.get_params()
|
||||
|
||||
if play_from is None:
|
||||
play_from = params.get(VIDEO_ID)
|
||||
|
||||
if recent_days is None:
|
||||
recent_days = params.get('recent_days')
|
||||
|
||||
num_items = len(items) if items else 0
|
||||
if num_items > 1:
|
||||
# select order
|
||||
if order is None:
|
||||
order = params.get(ORDER)
|
||||
if not order and play_from is None and recent_days is None:
|
||||
order = 'ask'
|
||||
if order == 'ask':
|
||||
order_list = ('default', 'reverse', 'shuffle')
|
||||
selection_list = [
|
||||
(context.localize('playlist.play.%s' % order), order)
|
||||
for order in order_list
|
||||
]
|
||||
order = context.get_ui().on_select(
|
||||
context.localize('playlist.play.select'),
|
||||
selection_list,
|
||||
)
|
||||
if order not in order_list:
|
||||
order = 'default'
|
||||
|
||||
# reverse the list
|
||||
if order == 'reverse':
|
||||
items = items[::-1]
|
||||
elif order == 'shuffle':
|
||||
# we have to shuffle the playlist by our self.
|
||||
# The implementation of XBMC/KODI is quite weak :(
|
||||
random.shuffle(items)
|
||||
elif not num_items:
|
||||
return False
|
||||
|
||||
if action == 'list':
|
||||
return items
|
||||
|
||||
# stop and clear the playlist
|
||||
playlist_player = context.get_playlist_player()
|
||||
playlist_player.clear()
|
||||
playlist_player.unshuffle()
|
||||
|
||||
# check if we have a video as starting point for the playlist
|
||||
if play_from == 'start':
|
||||
play_from = 0
|
||||
elif play_from == 'end':
|
||||
play_from = -1
|
||||
if isinstance(play_from, int):
|
||||
position = play_from
|
||||
elif isinstance(play_from, string_type):
|
||||
position = None
|
||||
else:
|
||||
position = False
|
||||
|
||||
# add videos to playlist
|
||||
num_items = 0
|
||||
# convert from days to seconds
|
||||
recent_limit = recent_days * 24 * 60 * 60 if recent_days else None
|
||||
for idx, item in enumerate(items):
|
||||
if not item.playable:
|
||||
continue
|
||||
if (recent_limit and datetime_to_since(
|
||||
context,
|
||||
item.get_dateadded(),
|
||||
as_seconds=True,
|
||||
) > recent_limit):
|
||||
continue
|
||||
playlist_player.add(item)
|
||||
num_items += 1
|
||||
if position is None and item.video_id == play_from:
|
||||
position = num_items
|
||||
|
||||
if not num_items:
|
||||
return False
|
||||
|
||||
if isinstance(play_from, int):
|
||||
if num_items >= play_from > 0:
|
||||
position = play_from
|
||||
elif play_from < 0:
|
||||
position = num_items + play_from
|
||||
else:
|
||||
position = 1
|
||||
elif not position:
|
||||
position = 1
|
||||
|
||||
if action == 'queue':
|
||||
return items
|
||||
if action == 'play':
|
||||
ui = context.get_ui()
|
||||
timeout = position
|
||||
while ui.busy_dialog_active() or playlist_player.size() < position:
|
||||
timeout -= 1
|
||||
if timeout < 0:
|
||||
command = playlist_player.play_playlist_item(position,
|
||||
defer=True)
|
||||
return UriItem(command)
|
||||
context.sleep(1)
|
||||
else:
|
||||
playlist_player.play_playlist_item(position)
|
||||
return items[position - 1]
|
||||
|
||||
|
||||
def process(provider, context, **_kwargs):
|
||||
"""
|
||||
Plays a video, playlist, or channel live stream.
|
||||
|
||||
Video:
|
||||
plugin://plugin.video.youtube/play/?video_id=<VIDEO_ID>
|
||||
|
||||
* VIDEO_ID: YouTube Video ID
|
||||
|
||||
Playlist:
|
||||
plugin://plugin.video.youtube/play/?playlist_id=<PLAYLIST_ID>[&order=<ORDER>][&action=<ACTION>]
|
||||
|
||||
* PLAYLIST_ID: YouTube Playlist ID
|
||||
* ORDER: [ask(default)|normal|reverse|shuffle] optional playlist order
|
||||
* ACTION: [list|play|queue|None(default)] optional action to perform
|
||||
|
||||
Channel live streams:
|
||||
plugin://plugin.video.youtube/play/?channel_id=<CHANNEL_ID>[&live=X]
|
||||
|
||||
* CHANNEL_ID: YouTube Channel ID
|
||||
* X: optional index of live stream to play if channel has multiple live streams. 1 (default) for first live stream
|
||||
"""
|
||||
ui = context.get_ui()
|
||||
|
||||
params = context.get_params()
|
||||
param_keys = params.keys()
|
||||
|
||||
if ({CHANNEL_ID, PLAYLIST_ID, PLAYLIST_IDS, VIDEO_ID, VIDEO_IDS}
|
||||
.isdisjoint(param_keys)):
|
||||
item_ids = context.parse_item_ids()
|
||||
if item_ids and VIDEO_ID in item_ids:
|
||||
context.set_params(**item_ids)
|
||||
else:
|
||||
return False
|
||||
|
||||
video_id = params.get(VIDEO_ID)
|
||||
video_ids = params.get(VIDEO_IDS)
|
||||
playlist_id = params.get(PLAYLIST_ID)
|
||||
|
||||
force_play_params = FORCE_PLAY_PARAMS.intersection(param_keys)
|
||||
|
||||
if video_id and not playlist_id and not video_ids:
|
||||
for param in force_play_params:
|
||||
del params[param]
|
||||
ui.set_property(param)
|
||||
|
||||
if context.get_handle() == -1:
|
||||
# This is required to trigger Kodi resume prompt, along with using
|
||||
# RunPlugin. Prompt will not be used if using PlayMedia
|
||||
if force_play_params and not params.get(PLAY_STRM):
|
||||
return UriItem('command://Action(Play)')
|
||||
|
||||
return UriItem('command://{0}'.format(
|
||||
context.create_uri(
|
||||
(PATHS.PLAY,),
|
||||
params,
|
||||
play=(xbmc.PLAYLIST_MUSIC
|
||||
if (ui.get_property(PLAY_FORCE_AUDIO)
|
||||
or context.get_settings().audio_only()) else
|
||||
xbmc.PLAYLIST_VIDEO),
|
||||
)
|
||||
))
|
||||
|
||||
if not context.get_system_version().compatible(22):
|
||||
ui.set_property(BUSY_FLAG)
|
||||
|
||||
media_item = _play_stream(provider, context)
|
||||
if media_item:
|
||||
playlist_player = context.get_playlist_player()
|
||||
position, _ = playlist_player.get_position()
|
||||
if position:
|
||||
item_uri = playlist_player.get_item_path(position - 1)
|
||||
if item_uri:
|
||||
ui.set_property(PLAYLIST_PATH, item_uri)
|
||||
ui.set_property(PLAYLIST_POSITION, str(position))
|
||||
else:
|
||||
ui.clear_property(BUSY_FLAG)
|
||||
for param in force_play_params:
|
||||
ui.clear_property(param)
|
||||
|
||||
return media_item
|
||||
|
||||
if playlist_id or video_ids or PLAYLIST_IDS in params:
|
||||
return _play_playlist(provider, context)
|
||||
|
||||
if CHANNEL_ID in params:
|
||||
return _play_channel_live(provider, context)
|
||||
return False
|
||||
@@ -0,0 +1,524 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
|
||||
Copyright (C) 2016-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from .utils import get_thumbnail
|
||||
from ...kodion import KodionException, logging
|
||||
from ...kodion.constants import (
|
||||
CHANNEL_ID,
|
||||
CONTEXT_MENU,
|
||||
KEYMAP,
|
||||
FOLDER_URI,
|
||||
PATHS,
|
||||
PLAYLIST_ID,
|
||||
PLAYLIST_ITEM_ID,
|
||||
TITLE,
|
||||
URI,
|
||||
VIDEO_ID,
|
||||
)
|
||||
|
||||
|
||||
def _process_add_video(provider, context):
|
||||
ui = context.get_ui()
|
||||
li_path = ui.get_listitem_info(URI)
|
||||
li_video_id = ui.get_listitem_property(VIDEO_ID)
|
||||
|
||||
client = provider.get_client(context)
|
||||
if not client.logged_in:
|
||||
raise KodionException('Playlist/Add: not logged in')
|
||||
|
||||
params = context.get_params()
|
||||
|
||||
playlist_id = params.get(PLAYLIST_ID)
|
||||
if not playlist_id:
|
||||
raise KodionException('Playlist/Add: missing playlist_id')
|
||||
elif playlist_id == 'watch_later':
|
||||
playlist_id = context.get_access_manager().get_watch_later_id()
|
||||
|
||||
video_id = params.get(VIDEO_ID, li_video_id)
|
||||
if not video_id:
|
||||
video_id = context.parse_item_ids(li_path).get(VIDEO_ID)
|
||||
if not video_id:
|
||||
raise KodionException('Playlist/Add: missing video_id')
|
||||
|
||||
localize = context.localize
|
||||
success = client.add_video_to_playlist(playlist_id, video_id)
|
||||
if not success:
|
||||
logging.debug('Playlist/Add: failed for playlist {playlist_id!r}'
|
||||
.format(playlist_id=playlist_id))
|
||||
ui.show_notification(
|
||||
message=localize(('failed.x', ('add.to.x', 'playlist'))),
|
||||
time_ms=2500,
|
||||
audible=False,
|
||||
)
|
||||
return False, {provider.FORCE_RETURN: True}
|
||||
|
||||
ui.show_notification(
|
||||
message=localize(('added.to.x', 'playlist')),
|
||||
time_ms=2500,
|
||||
audible=False,
|
||||
)
|
||||
|
||||
if params.get(KEYMAP) or not params.get(CONTEXT_MENU):
|
||||
ui.set_focus_next_item()
|
||||
|
||||
data_cache = context.get_data_cache()
|
||||
playlist_cache = data_cache.get_item_like(','.join((playlist_id, '%')))
|
||||
if playlist_cache:
|
||||
cache_key, _, cached_last_page = playlist_cache[0]
|
||||
if cached_last_page:
|
||||
data_cache.update_item(cache_key, None)
|
||||
|
||||
return True, {provider.FORCE_RETURN: True}
|
||||
|
||||
|
||||
def _process_remove_video(provider,
|
||||
context,
|
||||
playlist_id=None,
|
||||
playlist_item_id=None,
|
||||
video_id=None,
|
||||
video_name=None,
|
||||
confirmed=None):
|
||||
ui = context.get_ui()
|
||||
container_uri = ui.get_container_info(FOLDER_URI)
|
||||
li_playlist_id = ui.get_listitem_property(PLAYLIST_ID)
|
||||
li_playlist_item_id = ui.get_listitem_property(PLAYLIST_ITEM_ID)
|
||||
li_video_id = ui.get_listitem_property(VIDEO_ID)
|
||||
li_video_name = ui.get_listitem_info(TITLE)
|
||||
|
||||
client = provider.get_client(context)
|
||||
if not client.logged_in:
|
||||
raise KodionException('Playlist/Remove: not logged in')
|
||||
|
||||
params = context.get_params()
|
||||
|
||||
if playlist_id is None:
|
||||
playlist_id = params.pop(PLAYLIST_ID, li_playlist_id)
|
||||
if playlist_item_id is None:
|
||||
playlist_item_id = params.pop(PLAYLIST_ITEM_ID,
|
||||
li_playlist_item_id)
|
||||
if video_id is None:
|
||||
video_id = params.pop(VIDEO_ID, li_video_id)
|
||||
if video_name is None:
|
||||
video_name = params.pop('item_name', li_video_name)
|
||||
if confirmed is None:
|
||||
confirmed = params.pop('confirmed', False)
|
||||
|
||||
if not playlist_id:
|
||||
if confirmed:
|
||||
return False
|
||||
raise KodionException('Playlist/Remove: missing playlist ID')
|
||||
elif playlist_id == 'watch_later':
|
||||
playlist_id = context.get_access_manager().get_watch_later_id()
|
||||
elif playlist_id.lower() == 'hl':
|
||||
logging.debug('Playlist/Remove: failed for playlist {playlist_id!r}'
|
||||
.format(playlist_id=playlist_id))
|
||||
return False
|
||||
|
||||
localize = context.localize
|
||||
if confirmed or ui.on_remove_content(video_name):
|
||||
success = provider.get_client(context).remove_video_from_playlist(
|
||||
playlist_id=playlist_id,
|
||||
playlist_item_id=playlist_item_id,
|
||||
video_id=video_id,
|
||||
)
|
||||
if not success:
|
||||
ui.show_notification(
|
||||
message=localize(('failed.x', ('remove.from.x', 'playlist'))),
|
||||
time_ms=2500,
|
||||
audible=False,
|
||||
)
|
||||
return False
|
||||
|
||||
if not confirmed:
|
||||
ui.show_notification(
|
||||
message=localize(('removed.from.x', 'playlist')),
|
||||
time_ms=2500,
|
||||
audible=False,
|
||||
)
|
||||
|
||||
if not container_uri:
|
||||
return True
|
||||
|
||||
if params.get(KEYMAP) or not params.get(CONTEXT_MENU):
|
||||
ui.set_focus_next_item()
|
||||
|
||||
path, params = context.parse_uri(container_uri)
|
||||
if path.rstrip('/').endswith('/'.join((PATHS.PLAYLIST, playlist_id))):
|
||||
if 'refresh' not in params:
|
||||
params['refresh'] = True
|
||||
else:
|
||||
path = params.pop('reload_path', False if confirmed else None)
|
||||
|
||||
if path is not False:
|
||||
provider.reroute(
|
||||
context,
|
||||
path=path,
|
||||
params=params,
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _process_remove_playlist(provider, context):
|
||||
ui = context.get_ui()
|
||||
channel_id = ui.get_listitem_property(CHANNEL_ID)
|
||||
li_playlist_id = ui.get_listitem_property(PLAYLIST_ID)
|
||||
li_playlist_name = ui.get_listitem_info(TITLE)
|
||||
|
||||
params = context.get_params()
|
||||
localize = context.localize
|
||||
|
||||
playlist_id = params.get(PLAYLIST_ID, li_playlist_id)
|
||||
if not playlist_id:
|
||||
raise KodionException('Playlist/Remove: missing playlist_id')
|
||||
|
||||
playlist_name = params.get('item_name', li_playlist_name)
|
||||
if not playlist_name:
|
||||
raise KodionException('Playlist/Remove: missing item_name')
|
||||
|
||||
if ui.on_delete_content(playlist_name):
|
||||
success = provider.get_client(context).remove_playlist(playlist_id)
|
||||
if not success:
|
||||
ui.show_notification(
|
||||
message=localize(('failed.x', ('remove.x', 'playlist'))),
|
||||
time_ms=2500,
|
||||
audible=False,
|
||||
)
|
||||
return False, None
|
||||
|
||||
ui.show_notification(
|
||||
message=localize('removed.name.x', playlist_name),
|
||||
time_ms=2500,
|
||||
audible=False,
|
||||
)
|
||||
|
||||
if channel_id:
|
||||
data_cache = context.get_data_cache()
|
||||
data_cache.del_item(channel_id)
|
||||
return True, {provider.FORCE_REFRESH: True}
|
||||
|
||||
return False, None
|
||||
|
||||
|
||||
def _process_select_playlist(provider, context):
|
||||
ui = context.get_ui()
|
||||
li_path = ui.get_listitem_info(URI)
|
||||
li_video_id = ui.get_listitem_property(VIDEO_ID)
|
||||
|
||||
params = context.get_params()
|
||||
page_token = ''
|
||||
current_page = 0
|
||||
|
||||
video_id = params.get(VIDEO_ID, li_video_id)
|
||||
if not video_id:
|
||||
item_ids = context.parse_item_ids(li_path)
|
||||
if item_ids and VIDEO_ID in item_ids:
|
||||
context.set_params(**item_ids)
|
||||
else:
|
||||
raise KodionException('Playlist/Select: missing video_id')
|
||||
|
||||
function_cache = context.get_function_cache()
|
||||
client = provider.get_client(context)
|
||||
resource_manager = provider.get_resource_manager(context)
|
||||
|
||||
# add the 'Watch Later' playlist
|
||||
playlists = resource_manager.get_related_playlists('mine')
|
||||
if playlists and 'watchLater' in playlists:
|
||||
watch_later_id = playlists['watchLater'] or 'WL'
|
||||
else:
|
||||
watch_later_id = context.get_access_manager().get_watch_later_id()
|
||||
|
||||
# add the 'History' playlist
|
||||
if playlists and 'watchHistory' in playlists:
|
||||
watch_history_id = playlists['watchHistory'] or 'HL'
|
||||
else:
|
||||
watch_history_id = context.get_access_manager().get_watch_history_id()
|
||||
|
||||
account_playlists = {watch_later_id, watch_history_id}
|
||||
|
||||
thumb_size = context.get_settings().get_thumbnail_size()
|
||||
default_thumb = context.create_resource_path('media', 'playlist.png')
|
||||
|
||||
while 1:
|
||||
current_page += 1
|
||||
json_data = function_cache.run(
|
||||
client.get_playlists_of_channel,
|
||||
function_cache.ONE_MINUTE // 3,
|
||||
_refresh=context.refresh_requested(),
|
||||
channel_id='mine',
|
||||
page_token=page_token,
|
||||
)
|
||||
if not json_data:
|
||||
break
|
||||
playlists = json_data.get('items', [])
|
||||
page_token = json_data.get('nextPageToken', '')
|
||||
|
||||
items = []
|
||||
if current_page == 1:
|
||||
# Create a new playlist
|
||||
items.append((
|
||||
ui.bold(context.localize('playlist.create')), '',
|
||||
'playlist.create',
|
||||
default_thumb,
|
||||
))
|
||||
|
||||
# Add the 'Watch Later' playlist
|
||||
if watch_later_id:
|
||||
items.append((
|
||||
ui.bold(context.localize('watch_later')), '',
|
||||
watch_later_id,
|
||||
context.create_resource_path('media', 'watch_later.png')
|
||||
))
|
||||
|
||||
# Add the custom 'History' playlist
|
||||
# Can't directly add items to the YouTube Watch History list
|
||||
if watch_history_id and watch_history_id.upper() != 'HL':
|
||||
items.append((
|
||||
ui.bold(context.localize('history')), '',
|
||||
watch_history_id,
|
||||
context.create_resource_path('media', 'history.png')
|
||||
))
|
||||
|
||||
for playlist in playlists:
|
||||
playlist_id = playlist.get('id')
|
||||
if playlist_id in account_playlists:
|
||||
continue
|
||||
snippet = playlist.get('snippet', {})
|
||||
title = snippet.get('title')
|
||||
if title and playlist_id:
|
||||
items.append((
|
||||
title,
|
||||
snippet.get('description', ''),
|
||||
playlist_id,
|
||||
get_thumbnail(
|
||||
thumb_size, snippet.get('thumbnails'), default_thumb
|
||||
)
|
||||
))
|
||||
|
||||
if page_token:
|
||||
next_page = current_page + 1
|
||||
items.append((
|
||||
ui.bold(context.localize('page.next', next_page)), '',
|
||||
'playlist.next',
|
||||
'DefaultFolder.png',
|
||||
))
|
||||
|
||||
playlist_id = None
|
||||
result = ui.on_select(context.localize('playlist.select'), items)
|
||||
if result == 'playlist.next':
|
||||
continue
|
||||
elif result == 'playlist.create':
|
||||
result, text = ui.on_keyboard_input(
|
||||
context.localize('playlist.create'))
|
||||
if result and text:
|
||||
json_data = client.create_playlist(title=text)
|
||||
if not json_data:
|
||||
break
|
||||
playlist_id = json_data.get('id', '')
|
||||
elif result != -1:
|
||||
playlist_id = result
|
||||
|
||||
if playlist_id:
|
||||
new_params = dict(params, playlist_id=playlist_id)
|
||||
new_context = context.clone(new_params=new_params)
|
||||
_process_add_video(provider, new_context)
|
||||
break
|
||||
|
||||
|
||||
def _process_rename_playlist(provider, context):
|
||||
ui = context.get_ui()
|
||||
li_playlist_id = ui.get_listitem_property(PLAYLIST_ID)
|
||||
li_playlist_name = ui.get_listitem_info(TITLE)
|
||||
|
||||
params = context.get_params()
|
||||
localize = context.localize
|
||||
|
||||
playlist_id = params.get(PLAYLIST_ID, li_playlist_id)
|
||||
if not playlist_id:
|
||||
raise KodionException('Playlist/Rename: missing playlist_id')
|
||||
|
||||
result, text = ui.on_keyboard_input(
|
||||
localize('rename'),
|
||||
default=params.get('item_name', li_playlist_name),
|
||||
)
|
||||
if not result or not text:
|
||||
return False, None
|
||||
|
||||
success = provider.get_client(context).rename_playlist(
|
||||
playlist_id=playlist_id, new_title=text,
|
||||
)
|
||||
if not success:
|
||||
ui.show_notification(
|
||||
message=localize(('failed.x', ('rename', 'playlist'))),
|
||||
time_ms=2500,
|
||||
audible=False,
|
||||
)
|
||||
return False, None
|
||||
|
||||
ui.show_notification(
|
||||
message=localize('succeeded'),
|
||||
time_ms=2500,
|
||||
audible=False,
|
||||
)
|
||||
|
||||
data_cache = context.get_data_cache()
|
||||
data_cache.del_item(playlist_id)
|
||||
return True, {provider.FORCE_REFRESH: True}
|
||||
|
||||
|
||||
def _playlist_id_change(context, playlist, command):
|
||||
ui = context.get_ui()
|
||||
li_playlist_id = ui.get_listitem_property(PLAYLIST_ID)
|
||||
li_playlist_name = ui.get_listitem_info(TITLE)
|
||||
|
||||
playlist_id = context.get_param(PLAYLIST_ID, li_playlist_id)
|
||||
if not playlist_id:
|
||||
raise KodionException('{type}/{command}: missing playlist_id'
|
||||
.format(type=playlist, command=command))
|
||||
|
||||
playlist_name = context.get_param('item_name', li_playlist_name)
|
||||
if not playlist_name:
|
||||
raise KodionException('{type}/{command}: missing item_name'
|
||||
.format(type=playlist, command=command))
|
||||
|
||||
if ui.on_yes_no_input(
|
||||
context.get_name(),
|
||||
context.localize('{type}.list.{command}.check'.format(
|
||||
type=playlist, command=command
|
||||
), playlist_name),
|
||||
):
|
||||
if command == 'unassign':
|
||||
playlist_id = None
|
||||
if playlist == 'watch_later':
|
||||
context.get_access_manager().set_watch_later_id(playlist_id)
|
||||
else:
|
||||
context.get_access_manager().set_watch_history_id(playlist_id)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _process_rate_playlist(provider,
|
||||
context,
|
||||
rating,
|
||||
playlist_id=None,
|
||||
playlist_name=None,
|
||||
confirmed=None):
|
||||
ui = context.get_ui()
|
||||
container_uri = ui.get_container_info(FOLDER_URI)
|
||||
li_path = ui.get_listitem_info(URI)
|
||||
li_playlist_id = ui.get_listitem_property(PLAYLIST_ID)
|
||||
li_playlist_name = ui.get_listitem_info(TITLE)
|
||||
|
||||
params = context.get_params()
|
||||
if playlist_id is None:
|
||||
playlist_id = params.pop(PLAYLIST_ID, li_playlist_id)
|
||||
if playlist_name is None:
|
||||
playlist_name = params.pop('item_name', li_playlist_name)
|
||||
if confirmed is None:
|
||||
confirmed = rating == 'like' or params.pop('confirmed', False)
|
||||
|
||||
localize = context.localize
|
||||
|
||||
if not playlist_id:
|
||||
playlist_id = context.parse_item_ids(li_path).get(PLAYLIST_ID)
|
||||
if not playlist_id:
|
||||
raise KodionException('Playlist/Rate: missing playlist_id')
|
||||
|
||||
client = provider.get_client(context)
|
||||
if (rating == 'like'
|
||||
or confirmed
|
||||
or context.get_ui().on_remove_content(playlist_name)):
|
||||
success = client.rate_playlist(playlist_id, rating)
|
||||
else:
|
||||
success = None
|
||||
|
||||
if success:
|
||||
ui.show_notification(
|
||||
message=(localize('saved')
|
||||
if rating == 'like' else
|
||||
localize('removed.name.x', playlist_name)),
|
||||
time_ms=2500,
|
||||
audible=False,
|
||||
)
|
||||
|
||||
if not container_uri:
|
||||
return True
|
||||
|
||||
if params.get(KEYMAP) or not params.get(CONTEXT_MENU):
|
||||
ui.set_focus_next_item()
|
||||
|
||||
path, params = context.parse_uri(container_uri)
|
||||
if path.startswith(PATHS.SAVED_PLAYLISTS):
|
||||
if 'refresh' not in params:
|
||||
params['refresh'] = True
|
||||
else:
|
||||
path = params.pop('reload_path', False if confirmed else None)
|
||||
|
||||
if path is not False:
|
||||
provider.reroute(
|
||||
context,
|
||||
path=path,
|
||||
params=params,
|
||||
)
|
||||
return True
|
||||
|
||||
elif success is False:
|
||||
ui.show_notification(
|
||||
message=(localize(('failed.x', 'save'))
|
||||
if rating == 'like' else
|
||||
localize('remove')),
|
||||
time_ms=2500,
|
||||
audible=False,
|
||||
)
|
||||
return False
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def process(provider,
|
||||
context,
|
||||
re_match=None,
|
||||
command=None,
|
||||
category=None,
|
||||
**kwargs):
|
||||
if re_match:
|
||||
if command is None:
|
||||
command = re_match.group('command')
|
||||
if category is None:
|
||||
category = re_match.group('category')
|
||||
|
||||
if category == 'video':
|
||||
if command == 'add':
|
||||
return _process_add_video(provider, context)
|
||||
|
||||
if command == 'remove':
|
||||
return _process_remove_video(provider, context, **kwargs)
|
||||
|
||||
elif category == 'playlist':
|
||||
if command == 'remove':
|
||||
return _process_remove_playlist(provider, context)
|
||||
|
||||
if command == 'select':
|
||||
return _process_select_playlist(provider, context)
|
||||
|
||||
if command == 'rename':
|
||||
return _process_rename_playlist(provider, context)
|
||||
|
||||
if command in {'like', 'unlike'}:
|
||||
return _process_rate_playlist(provider, context, command)
|
||||
|
||||
elif category in {'watch_later', 'history'}:
|
||||
if command in {'assign', 'unassign'}:
|
||||
return _playlist_id_change(context, category, command)
|
||||
|
||||
raise KodionException('Unknown playlist category {0!r} or command {1!r}'
|
||||
.format(category, command))
|
||||
@@ -0,0 +1,442 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
|
||||
Copyright (C) 2016-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
import os
|
||||
|
||||
from ...kodion.compatibility import urlencode, xbmcvfs
|
||||
from ...kodion.constants import ADDON_ID, DATA_PATH, WAIT_END_FLAG
|
||||
from ...kodion.network import get_listen_addresses, httpd_status
|
||||
from ...kodion.sql_store import PlaybackHistory, SearchHistory
|
||||
from ...kodion.utils.convert_format import to_unicode
|
||||
from ...kodion.utils.datetime import since_epoch, strptime
|
||||
|
||||
|
||||
def process_pre_run(context):
|
||||
context.get_function_cache().clear()
|
||||
|
||||
|
||||
def process_language(context, step, steps, **_kwargs):
|
||||
localize = context.localize
|
||||
ui = context.get_ui()
|
||||
|
||||
step += 1
|
||||
if ui.on_yes_no_input(
|
||||
'{youtube} - {setup_wizard} ({step}/{steps})'.format(
|
||||
youtube=localize('youtube'),
|
||||
setup_wizard=localize('setup_wizard'),
|
||||
step=step,
|
||||
steps=steps,
|
||||
),
|
||||
localize(('setup_wizard.prompt.x', 'setup_wizard.prompt.locale')),
|
||||
):
|
||||
context.execute(
|
||||
'RunScript({addon_id},config/language_region)'.format(
|
||||
addon_id=ADDON_ID,
|
||||
),
|
||||
wait_for=WAIT_END_FLAG,
|
||||
)
|
||||
context.get_settings(refresh=True)
|
||||
return step
|
||||
|
||||
|
||||
def process_geo_location(context, step, steps, **_kwargs):
|
||||
localize = context.localize
|
||||
|
||||
step += 1
|
||||
if context.get_ui().on_yes_no_input(
|
||||
'{youtube} - {setup_wizard} ({step}/{steps})'.format(
|
||||
youtube=localize('youtube'),
|
||||
setup_wizard=localize('setup_wizard'),
|
||||
step=step,
|
||||
steps=steps,
|
||||
),
|
||||
localize(('setup_wizard.prompt.x',
|
||||
'setup_wizard.prompt.my_location')),
|
||||
):
|
||||
context.execute(
|
||||
'RunScript({addon_id},config/geo_location)'.format(
|
||||
addon_id=ADDON_ID,
|
||||
),
|
||||
wait_for=WAIT_END_FLAG,
|
||||
)
|
||||
context.get_settings(refresh=True)
|
||||
return step
|
||||
|
||||
|
||||
def process_default_settings(context, step, steps, **_kwargs):
|
||||
localize = context.localize
|
||||
settings = context.get_settings()
|
||||
ui = context.get_ui()
|
||||
|
||||
step += 1
|
||||
if ui.on_yes_no_input(
|
||||
'{youtube} - {setup_wizard} ({step}/{steps})'.format(
|
||||
youtube=localize('youtube'),
|
||||
setup_wizard=localize('setup_wizard'),
|
||||
step=step,
|
||||
steps=steps,
|
||||
),
|
||||
localize(('setup_wizard.prompt.x',
|
||||
'setup_wizard.prompt.settings.defaults')),
|
||||
):
|
||||
settings.use_isa(True)
|
||||
settings.use_mpd_videos(True)
|
||||
settings.stream_select(4 if settings.ask_for_video_quality() else 3)
|
||||
settings.set_subtitle_download(False)
|
||||
if context.get_system_version().compatible(21):
|
||||
settings.live_stream_type(2)
|
||||
else:
|
||||
settings.live_stream_type(1)
|
||||
if not xbmcvfs.exists('special://profile/playercorefactory.xml'):
|
||||
settings.support_alternative_player(False)
|
||||
settings.default_player_web_urls(False)
|
||||
settings.alternative_player_web_urls(False)
|
||||
settings.alternative_player_mpd(False)
|
||||
if settings.cache_size() < 50:
|
||||
settings.cache_size(50)
|
||||
settings.httpd_sleep_allowed(True)
|
||||
with ui.create_progress_dialog(
|
||||
heading=localize('httpd'),
|
||||
message=localize('httpd.connect.wait'),
|
||||
total=1,
|
||||
background=False,
|
||||
) as progress_dialog:
|
||||
progress_dialog.update()
|
||||
if settings.httpd_listen() == '0.0.0.0':
|
||||
settings.httpd_listen('127.0.0.1')
|
||||
if not httpd_status(context):
|
||||
port = settings.httpd_port()
|
||||
addresses = get_listen_addresses()
|
||||
progress_dialog.grow_total(delta=len(addresses))
|
||||
for address in addresses:
|
||||
progress_dialog.update()
|
||||
if httpd_status(context, (address, port)):
|
||||
settings.httpd_listen(address)
|
||||
break
|
||||
context.sleep(5)
|
||||
else:
|
||||
ui.show_notification(localize('httpd.connect.failed'),
|
||||
header=localize('httpd'))
|
||||
settings.httpd_listen('0.0.0.0')
|
||||
return step
|
||||
|
||||
|
||||
def process_list_detail_settings(context, step, steps, **_kwargs):
|
||||
localize = context.localize
|
||||
settings = context.get_settings()
|
||||
|
||||
step += 1
|
||||
if context.get_ui().on_yes_no_input(
|
||||
'{youtube} - {setup_wizard} ({step}/{steps})'.format(
|
||||
youtube=localize('youtube'),
|
||||
setup_wizard=localize('setup_wizard'),
|
||||
step=step,
|
||||
steps=steps,
|
||||
),
|
||||
localize(('setup_wizard.prompt.x',
|
||||
'setup_wizard.prompt.settings.list_details')),
|
||||
):
|
||||
settings.show_detailed_description(False)
|
||||
settings.show_detailed_labels(False)
|
||||
else:
|
||||
settings.show_detailed_description(True)
|
||||
settings.show_detailed_labels(True)
|
||||
return step
|
||||
|
||||
|
||||
def process_performance_settings(context, step, steps, **_kwargs):
|
||||
localize = context.localize
|
||||
settings = context.get_settings()
|
||||
ui = context.get_ui()
|
||||
|
||||
step += 1
|
||||
if ui.on_yes_no_input(
|
||||
'{youtube} - {setup_wizard} ({step}/{steps})'.format(
|
||||
youtube=localize('youtube'),
|
||||
setup_wizard=localize('setup_wizard'),
|
||||
step=step,
|
||||
steps=steps,
|
||||
),
|
||||
localize(('setup_wizard.prompt.x',
|
||||
'setup_wizard.prompt.settings.performance')),
|
||||
):
|
||||
device_types = {
|
||||
'720p30': {
|
||||
'max_resolution': 3, # 720p
|
||||
'stream_features': ('avc1', '3d', 'vr', 'prefer_dub', 'prefer_auto_dub', 'mp4a', 'vtt', 'filter', 'alt_sort'),
|
||||
'num_items': 10,
|
||||
},
|
||||
'1080p30_avc': {
|
||||
'max_resolution': 4, # 1080p
|
||||
'stream_features': ('avc1', '3d', 'vr', 'prefer_dub', 'prefer_auto_dub', 'vorbis', 'mp4a', 'vtt', 'filter', 'alt_sort'),
|
||||
'num_items': 10,
|
||||
},
|
||||
'1080p30': {
|
||||
'max_resolution': 4, # 1080p
|
||||
'stream_features': ('avc1', 'vp9', '3d', 'vr', 'prefer_dub', 'prefer_auto_dub', 'vorbis', 'mp4a', 'ssa', 'ac-3', 'ec-3', 'dts', 'vtt', 'filter', 'alt_sort'),
|
||||
'num_items': 20,
|
||||
},
|
||||
'1080p60': {
|
||||
'max_resolution': 4, # 1080p
|
||||
'stream_features': ('avc1', 'vp9', 'hfr', '3d', 'vr', 'prefer_dub', 'prefer_auto_dub', 'vorbis', 'mp4a', 'ssa', 'ac-3', 'ec-3', 'dts', 'vtt', 'filter'),
|
||||
'num_items': 30,
|
||||
},
|
||||
'4k30': {
|
||||
'max_resolution': 6, # 4k
|
||||
'stream_features': ('avc1', 'vp9', 'hdr', 'hfr', '3d', 'vr', 'prefer_dub', 'prefer_auto_dub', 'no_hfr_max', 'vorbis', 'mp4a', 'ssa', 'ac-3', 'ec-3', 'dts', 'vtt', 'filter'),
|
||||
'num_items': 50,
|
||||
},
|
||||
'4k60': {
|
||||
'max_resolution': 6, # 4k
|
||||
'stream_features': ('avc1', 'vp9', 'hdr', 'hfr', '3d', 'vr', 'prefer_dub', 'prefer_auto_dub', 'vorbis', 'mp4a', 'ssa', 'ac-3', 'ec-3', 'dts', 'vtt', 'filter'),
|
||||
'num_items': 50,
|
||||
},
|
||||
'4k60_av1': {
|
||||
'max_resolution': 6, # 4k
|
||||
'stream_features': ('avc1', 'vp9', 'av01', 'hdr', 'hfr', '3d', 'vr', 'prefer_dub', 'prefer_auto_dub', 'vorbis', 'mp4a', 'ssa', 'ac-3', 'ec-3', 'dts', 'vtt', 'filter'),
|
||||
'num_items': 50,
|
||||
},
|
||||
'max': {
|
||||
'max_resolution': 7, # 8k
|
||||
'stream_features': ('avc1', 'vp9', 'av01', 'hdr', 'hfr', '3d', 'vr', 'prefer_dub', 'prefer_auto_dub', 'vorbis', 'mp4a', 'ssa', 'ac-3', 'ec-3', 'dts', 'vtt', 'filter'),
|
||||
'num_items': 50,
|
||||
},
|
||||
}
|
||||
items = [
|
||||
localize('setup_wizard.capabilities.' + item).split(' | ') + [item]
|
||||
for item in device_types
|
||||
]
|
||||
device_type = ui.on_select(
|
||||
localize('setup_wizard.capabilities'),
|
||||
items=items,
|
||||
use_details=True,
|
||||
)
|
||||
if device_type == -1:
|
||||
return step
|
||||
|
||||
device_type = device_types[device_type]
|
||||
if 'settings' in device_type:
|
||||
for setting in device_type['settings']:
|
||||
setting[0](*setting[1])
|
||||
settings.mpd_video_qualities(device_type['max_resolution'])
|
||||
if not settings.use_mpd_videos():
|
||||
settings.fixed_video_quality(device_type['max_resolution'])
|
||||
settings.stream_features(device_type['stream_features'])
|
||||
settings.items_per_page(device_type['num_items'])
|
||||
return step
|
||||
|
||||
|
||||
def process_subtitles(context, step, steps, **_kwargs):
|
||||
localize = context.localize
|
||||
|
||||
step += 1
|
||||
if context.get_ui().on_yes_no_input(
|
||||
'{youtube} - {setup_wizard} ({step}/{steps})'.format(
|
||||
youtube=localize('youtube'),
|
||||
setup_wizard=localize('setup_wizard'),
|
||||
step=step,
|
||||
steps=steps,
|
||||
),
|
||||
localize(('setup_wizard.prompt.x',
|
||||
'setup_wizard.prompt.subtitles')),
|
||||
):
|
||||
context.execute(
|
||||
'RunScript({addon_id},config/subtitles)'.format(
|
||||
addon_id=ADDON_ID,
|
||||
),
|
||||
wait_for=WAIT_END_FLAG,
|
||||
)
|
||||
context.get_settings(refresh=True)
|
||||
return step
|
||||
|
||||
|
||||
def process_old_search_db(context, step, steps, **_kwargs):
|
||||
localize = context.localize
|
||||
ui = context.get_ui()
|
||||
|
||||
search_db_path = (
|
||||
xbmcvfs.translatePath(DATA_PATH),
|
||||
'kodion',
|
||||
'search.sqlite'
|
||||
)
|
||||
search_db_path_str = os.path.join(*search_db_path)
|
||||
step += 1
|
||||
if xbmcvfs.exists(search_db_path_str) and ui.on_yes_no_input(
|
||||
'{youtube} - {setup_wizard} ({step}/{steps})'.format(
|
||||
youtube=localize('youtube'),
|
||||
setup_wizard=localize('setup_wizard'),
|
||||
step=step,
|
||||
steps=steps,
|
||||
),
|
||||
localize('setup_wizard.prompt.import_search_history'),
|
||||
):
|
||||
def _convert_old_search_item(value, item):
|
||||
return {
|
||||
'text': to_unicode(value),
|
||||
'timestamp': since_epoch(strptime(item[1])),
|
||||
}
|
||||
|
||||
search_history = context.get_search_history()
|
||||
old_search_db = SearchHistory(
|
||||
search_db_path,
|
||||
migrate='storage',
|
||||
)
|
||||
items = old_search_db.get_items(process=_convert_old_search_item)
|
||||
for search in items:
|
||||
search_history.update_item(search['text'], search['timestamp'])
|
||||
|
||||
ui.show_notification(localize('succeeded'))
|
||||
context.execute(
|
||||
'RunScript({addon},maintenance/{action}?{query})'.format(
|
||||
addon=ADDON_ID,
|
||||
action='delete',
|
||||
query=urlencode({
|
||||
'target': 'other_file',
|
||||
'path': search_db_path_str,
|
||||
}),
|
||||
),
|
||||
wait_for=WAIT_END_FLAG,
|
||||
)
|
||||
return step
|
||||
|
||||
|
||||
def process_old_history_db(context, step, steps, **_kwargs):
|
||||
localize = context.localize
|
||||
ui = context.get_ui()
|
||||
|
||||
history_db_path = (
|
||||
xbmcvfs.translatePath(DATA_PATH),
|
||||
'playback',
|
||||
context.get_access_manager().get_current_user_id() + '.sqlite',
|
||||
)
|
||||
history_db_path_str = os.path.join(*history_db_path)
|
||||
step += 1
|
||||
if xbmcvfs.exists(history_db_path_str) and ui.on_yes_no_input(
|
||||
'{youtube} - {setup_wizard} ({step}/{steps})'.format(
|
||||
youtube=localize('youtube'),
|
||||
setup_wizard=localize('setup_wizard'),
|
||||
step=step,
|
||||
steps=steps,
|
||||
),
|
||||
localize('setup_wizard.prompt.import_playback_history'),
|
||||
):
|
||||
def _convert_old_history_item(value, item):
|
||||
values = value.split(',')
|
||||
return {
|
||||
'play_count': int(values[0]),
|
||||
'total_time': float(values[1]),
|
||||
'played_time': float(values[2]),
|
||||
'played_percent': int(values[3]),
|
||||
'timestamp': since_epoch(strptime(item[1])),
|
||||
}
|
||||
|
||||
playback_history = context.get_playback_history()
|
||||
old_history_db = PlaybackHistory(
|
||||
history_db_path,
|
||||
migrate='storage',
|
||||
)
|
||||
items = old_history_db.get_items(process=_convert_old_history_item)
|
||||
for video_id, history in items.items():
|
||||
timestamp = history.pop('timestamp', None)
|
||||
playback_history.update_item(video_id, history, timestamp)
|
||||
|
||||
ui.show_notification(localize('succeeded'))
|
||||
context.execute(
|
||||
'RunScript({addon},maintenance/{action}?{query})'.format(
|
||||
addon=ADDON_ID,
|
||||
action='delete',
|
||||
query=urlencode({
|
||||
'target': 'other_file',
|
||||
'path': history_db_path_str,
|
||||
}),
|
||||
),
|
||||
wait_for=WAIT_END_FLAG,
|
||||
)
|
||||
return step
|
||||
|
||||
|
||||
def process_refresh_settings(context, step, steps, **_kwargs):
|
||||
localize = context.localize
|
||||
|
||||
step += 1
|
||||
if context.get_ui().on_yes_no_input(
|
||||
'{youtube} - {setup_wizard} ({step}/{steps})'.format(
|
||||
youtube=localize('youtube'),
|
||||
setup_wizard=localize('setup_wizard'),
|
||||
step=step,
|
||||
steps=steps,
|
||||
),
|
||||
localize('setup_wizard.prompt.settings.refresh'),
|
||||
):
|
||||
context.execute(
|
||||
'RunScript({addon},maintenance/{action}?{query})'.format(
|
||||
addon=ADDON_ID,
|
||||
action='refresh',
|
||||
query='target=settings_xml',
|
||||
),
|
||||
wait_for=WAIT_END_FLAG,
|
||||
)
|
||||
return step
|
||||
|
||||
|
||||
def process_migrate_watch_history(context, step, steps, **_kwargs):
|
||||
localize = context.localize
|
||||
access_manager = context.get_access_manager()
|
||||
watch_history_id = access_manager.get_watch_history_id().upper()
|
||||
|
||||
step += 1
|
||||
if (watch_history_id != 'HL' and context.get_ui().on_yes_no_input(
|
||||
'{youtube} - {setup_wizard} ({step}/{steps})'.format(
|
||||
youtube=localize('youtube'),
|
||||
setup_wizard=localize('setup_wizard'),
|
||||
step=step,
|
||||
steps=steps,
|
||||
),
|
||||
localize('setup_wizard.prompt.migrate_watch_history'),
|
||||
)):
|
||||
access_manager.set_watch_history_id('HL')
|
||||
context.get_settings().use_remote_history(True)
|
||||
return step
|
||||
|
||||
|
||||
def process_migrate_watch_later(context, step, steps, **_kwargs):
|
||||
localize = context.localize
|
||||
access_manager = context.get_access_manager()
|
||||
watch_later_id = access_manager.get_watch_later_id().upper()
|
||||
|
||||
step += 1
|
||||
if (watch_later_id != 'WL' and context.get_ui().on_yes_no_input(
|
||||
'{youtube} - {setup_wizard} ({step}/{steps})'.format(
|
||||
youtube=localize('youtube'),
|
||||
setup_wizard=localize('setup_wizard'),
|
||||
step=step,
|
||||
steps=steps,
|
||||
),
|
||||
localize('setup_wizard.prompt.migrate_watch_later'),
|
||||
)):
|
||||
access_manager.set_watch_later_id('WL')
|
||||
return step
|
||||
|
||||
|
||||
STEPS = [
|
||||
process_default_settings,
|
||||
process_performance_settings,
|
||||
process_language,
|
||||
process_subtitles,
|
||||
process_old_search_db,
|
||||
process_old_history_db,
|
||||
process_migrate_watch_history,
|
||||
process_migrate_watch_later,
|
||||
process_geo_location,
|
||||
process_list_detail_settings,
|
||||
process_refresh_settings,
|
||||
]
|
||||
@@ -0,0 +1,705 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
|
||||
Copyright (C) 2016-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from functools import partial
|
||||
|
||||
from . import UrlResolver, UrlToItemConverter, utils, v3
|
||||
from ...kodion import KodionException, logging
|
||||
from ...kodion.constants import (
|
||||
CHANNEL_ID,
|
||||
CHANNEL_IDS,
|
||||
CONTENT,
|
||||
HIDE_FOLDERS,
|
||||
HIDE_LIVE,
|
||||
HIDE_SHORTS,
|
||||
HIDE_VIDEOS,
|
||||
INCOGNITO,
|
||||
PAGE,
|
||||
PATHS,
|
||||
PLAYLIST_ID,
|
||||
PLAYLIST_IDS,
|
||||
VIDEO_ID,
|
||||
)
|
||||
from ...kodion.items import DirectoryItem, UriItem
|
||||
from ...kodion.utils.convert_format import strip_html_from_text
|
||||
|
||||
|
||||
def _process_related_videos(provider, context, client):
|
||||
function_cache = context.get_function_cache()
|
||||
refresh = context.refresh_requested()
|
||||
params = context.get_params()
|
||||
|
||||
video_id = params.get(VIDEO_ID)
|
||||
if video_id:
|
||||
json_data = function_cache.run(
|
||||
client.get_related_videos,
|
||||
function_cache.ONE_HOUR,
|
||||
_refresh=refresh,
|
||||
video_id=video_id,
|
||||
page_token=params.get('page_token', ''),
|
||||
)
|
||||
if not json_data:
|
||||
return False, None
|
||||
|
||||
filler = partial(
|
||||
function_cache.run,
|
||||
client.get_related_videos,
|
||||
function_cache.ONE_HOUR,
|
||||
_refresh=refresh,
|
||||
video_id=video_id,
|
||||
)
|
||||
json_data['_pre_filler'] = filler
|
||||
json_data['_post_filler'] = filler
|
||||
category_label = context.localize(
|
||||
'video.related.to.x',
|
||||
params.get('item_name') or context.localize('untitled'),
|
||||
)
|
||||
else:
|
||||
json_data = function_cache.run(
|
||||
client.get_related_for_home,
|
||||
function_cache.ONE_HOUR,
|
||||
_refresh=refresh,
|
||||
)
|
||||
if not json_data:
|
||||
return False, None
|
||||
category_label = None
|
||||
|
||||
result = v3.response_to_items(
|
||||
provider,
|
||||
context,
|
||||
json_data,
|
||||
allow_duplicates=False,
|
||||
)
|
||||
options = {
|
||||
provider.CONTENT_TYPE: {
|
||||
'content_type': CONTENT.VIDEO_CONTENT,
|
||||
'sub_type': None,
|
||||
'category_label': category_label,
|
||||
},
|
||||
}
|
||||
return result, options
|
||||
|
||||
|
||||
def _process_comments(provider, context, client):
|
||||
params = context.get_params()
|
||||
video_id = params.get(VIDEO_ID)
|
||||
parent_id = params.get('parent_id')
|
||||
if not video_id and not parent_id:
|
||||
return False, None
|
||||
|
||||
if video_id:
|
||||
json_data = client.get_parent_comments(
|
||||
video_id=video_id,
|
||||
page_token=params.get('page_token', ''),
|
||||
)
|
||||
elif parent_id:
|
||||
json_data = client.get_child_comments(
|
||||
parent_id=parent_id,
|
||||
page_token=context.get_param('page_token', ''),
|
||||
)
|
||||
else:
|
||||
json_data = None
|
||||
if not json_data:
|
||||
return False, None
|
||||
|
||||
result = v3.response_to_items(provider, context, json_data)
|
||||
options = {
|
||||
provider.CONTENT_TYPE: {
|
||||
'content_type': CONTENT.LIST_CONTENT,
|
||||
'sub_type': CONTENT.COMMENTS,
|
||||
'category_label': params.get('item_name', video_id),
|
||||
},
|
||||
}
|
||||
return result, options
|
||||
|
||||
|
||||
def _process_recommendations(provider, context, client):
|
||||
function_cache = context.get_function_cache()
|
||||
refresh = context.refresh_requested()
|
||||
params = context.get_params()
|
||||
|
||||
browse_id = 'FEwhat_to_watch'
|
||||
browse_client = 'tv'
|
||||
browse_paths = client.JSON_PATHS['tv_shelf_horizontal']
|
||||
# browse_client = 'android_vr'
|
||||
# browse_paths = client.JSON_PATHS['vr_shelf']
|
||||
|
||||
json_data = function_cache.run(
|
||||
client.get_browse_items,
|
||||
function_cache.ONE_HOUR,
|
||||
_refresh=refresh,
|
||||
browse_id=browse_id,
|
||||
client=browse_client,
|
||||
do_auth=True,
|
||||
page_token=params.get('page_token'),
|
||||
click_tracking=params.get('click_tracking'),
|
||||
visitor=params.get('visitor'),
|
||||
json_path=browse_paths,
|
||||
)
|
||||
if not json_data:
|
||||
return False, None
|
||||
|
||||
filler = partial(
|
||||
function_cache.run,
|
||||
client.get_browse_items,
|
||||
function_cache.ONE_HOUR,
|
||||
_refresh=refresh,
|
||||
browse_id=browse_id,
|
||||
client=browse_client,
|
||||
do_auth=True,
|
||||
json_path=browse_paths,
|
||||
)
|
||||
json_data['_pre_filler'] = filler
|
||||
json_data['_post_filler'] = filler
|
||||
|
||||
result = v3.response_to_items(
|
||||
provider,
|
||||
context,
|
||||
json_data,
|
||||
allow_duplicates=False,
|
||||
)
|
||||
options = {
|
||||
provider.CONTENT_TYPE: {
|
||||
'content_type': CONTENT.VIDEO_CONTENT,
|
||||
'sub_type': None,
|
||||
'category_label': None,
|
||||
},
|
||||
}
|
||||
return result, options
|
||||
|
||||
|
||||
def _process_trending(provider, context, client):
|
||||
json_data = client.get_trending_videos(
|
||||
page_token=context.get_param('page_token'),
|
||||
)
|
||||
if not json_data:
|
||||
return False, None
|
||||
|
||||
json_data['_post_filler'] = client.get_trending_videos
|
||||
|
||||
result = v3.response_to_items(provider, context, json_data)
|
||||
options = {
|
||||
provider.CONTENT_TYPE: {
|
||||
'content_type': CONTENT.VIDEO_CONTENT,
|
||||
'sub_type': None,
|
||||
'category_label': None,
|
||||
},
|
||||
}
|
||||
return result, options
|
||||
|
||||
|
||||
def _process_browse_channels(provider, context, client):
|
||||
guide_id = context.get_param('guide_id')
|
||||
if guide_id:
|
||||
json_data = client.get_guide_category(guide_id)
|
||||
else:
|
||||
function_cache = context.get_function_cache()
|
||||
json_data = function_cache.run(
|
||||
client.get_guide_categories,
|
||||
function_cache.ONE_MONTH,
|
||||
_refresh=context.refresh_requested(),
|
||||
)
|
||||
if not json_data:
|
||||
return False, None
|
||||
|
||||
result = v3.response_to_items(provider, context, json_data)
|
||||
options = {
|
||||
provider.CONTENT_TYPE: {
|
||||
'content_type': CONTENT.LIST_CONTENT,
|
||||
'sub_type': None,
|
||||
'category_label': None,
|
||||
},
|
||||
}
|
||||
return result, options
|
||||
|
||||
|
||||
def _process_disliked_videos(provider, context, client):
|
||||
json_data = client.get_disliked_videos(
|
||||
page_token=context.get_param('page_token', '')
|
||||
)
|
||||
if not json_data:
|
||||
return False, None
|
||||
|
||||
result = v3.response_to_items(provider, context, json_data)
|
||||
options = {
|
||||
provider.CONTENT_TYPE: {
|
||||
'content_type': CONTENT.VIDEO_CONTENT,
|
||||
'sub_type': None,
|
||||
'category_label': None,
|
||||
},
|
||||
}
|
||||
return result, options
|
||||
|
||||
|
||||
def _process_live_events(provider, context, client, event_type='live'):
|
||||
# TODO: cache result
|
||||
params = context.get_params()
|
||||
json_data = client.get_live_events(
|
||||
event_type=event_type,
|
||||
order=params.get('order',
|
||||
'date' if event_type == 'upcoming' else 'viewCount'),
|
||||
page_token=params.get('page_token', ''),
|
||||
location=params.get('location', False),
|
||||
after={'days': 3} if event_type == 'completed' else None,
|
||||
)
|
||||
if not json_data:
|
||||
return False, None
|
||||
|
||||
result = v3.response_to_items(provider, context, json_data)
|
||||
options = {
|
||||
provider.CONTENT_TYPE: {
|
||||
'content_type': CONTENT.VIDEO_CONTENT,
|
||||
'sub_type': None,
|
||||
'category_label': None,
|
||||
},
|
||||
}
|
||||
return result, options
|
||||
|
||||
|
||||
def _process_description_links(provider, context):
|
||||
params = context.get_params()
|
||||
incognito = params.get(INCOGNITO, False)
|
||||
addon_id = params.get('addon_id', '')
|
||||
|
||||
def _extract_urls(video_id):
|
||||
url_resolver = UrlResolver(context)
|
||||
|
||||
with context.get_ui().create_progress_dialog(
|
||||
heading=context.localize('please_wait'), background=False
|
||||
) as progress_dialog:
|
||||
resource_manager = provider.get_resource_manager(context)
|
||||
|
||||
video_data = resource_manager.get_videos((video_id,))
|
||||
yt_item = video_data[video_id] if video_data else None
|
||||
if not yt_item or 'snippet' not in yt_item:
|
||||
context.get_ui().on_ok(
|
||||
title=context.localize('video.description_links'),
|
||||
text=context.localize('video.description_links.not_found')
|
||||
)
|
||||
return False, None
|
||||
snippet = yt_item['snippet']
|
||||
description = strip_html_from_text(snippet['description'])
|
||||
|
||||
function_cache = context.get_function_cache()
|
||||
urls = function_cache.run(
|
||||
utils.extract_urls,
|
||||
function_cache.ONE_DAY,
|
||||
_refresh=context.refresh_requested(),
|
||||
text=description,
|
||||
)
|
||||
|
||||
progress_dialog.set_total(len(urls))
|
||||
|
||||
res_urls = []
|
||||
for url in urls:
|
||||
progress_dialog.update(steps=1, text=url)
|
||||
resolved_url = url_resolver.resolve(url)
|
||||
res_urls.append(resolved_url)
|
||||
|
||||
if progress_dialog.is_aborted():
|
||||
logging.debug('Resolving urls aborted')
|
||||
break
|
||||
|
||||
url_to_item_converter = UrlToItemConverter()
|
||||
url_to_item_converter.process_urls(res_urls, context)
|
||||
result = url_to_item_converter.get_items(provider, context)
|
||||
|
||||
if not result:
|
||||
context.get_ui().on_ok(
|
||||
title=context.localize('video.description_links'),
|
||||
text=context.localize('video.description_links.not_found')
|
||||
)
|
||||
return False, None
|
||||
|
||||
options = {
|
||||
provider.CONTENT_TYPE: {
|
||||
'content_type': CONTENT.VIDEO_CONTENT,
|
||||
'sub_type': None,
|
||||
'category_label': None,
|
||||
},
|
||||
}
|
||||
return result, options
|
||||
|
||||
def _display_channels(channel_ids):
|
||||
item_params = {}
|
||||
if incognito:
|
||||
item_params[INCOGNITO] = incognito
|
||||
if addon_id:
|
||||
item_params['addon_id'] = addon_id
|
||||
|
||||
channel_id_dict = {}
|
||||
for channel_id in channel_ids:
|
||||
channel_item = DirectoryItem(
|
||||
name='',
|
||||
uri=context.create_uri(
|
||||
(PATHS.CHANNEL, channel_id,),
|
||||
item_params,
|
||||
),
|
||||
channel_id=channel_id,
|
||||
)
|
||||
channel_items = channel_id_dict.setdefault(channel_id, [])
|
||||
channel_items.append(channel_item)
|
||||
|
||||
utils.update_channel_items(provider, context, channel_id_dict)
|
||||
|
||||
# clean up - remove empty entries
|
||||
result = [channel_item
|
||||
for channel_items in channel_id_dict.values()
|
||||
for channel_item in channel_items
|
||||
if channel_item.get_name()]
|
||||
if not result:
|
||||
return False, None
|
||||
|
||||
options = {
|
||||
provider.CONTENT_TYPE: {
|
||||
'content_type': CONTENT.LIST_CONTENT,
|
||||
'sub_type': None,
|
||||
'category_label': context.localize(
|
||||
'video.description_links.from.x',
|
||||
params.get('item_name') or context.localize('untitled'),
|
||||
),
|
||||
},
|
||||
}
|
||||
return result, options
|
||||
|
||||
def _display_playlists(playlist_ids):
|
||||
item_params = {}
|
||||
if incognito:
|
||||
item_params[INCOGNITO] = incognito
|
||||
if addon_id:
|
||||
item_params['addon_id'] = addon_id
|
||||
|
||||
playlist_id_dict = {}
|
||||
for playlist_id in playlist_ids:
|
||||
playlist_item = DirectoryItem(
|
||||
name='',
|
||||
uri=context.create_uri(
|
||||
(PATHS.PLAYLIST, playlist_id,),
|
||||
item_params,
|
||||
),
|
||||
playlist_id=playlist_id,
|
||||
)
|
||||
playlist_items = playlist_id_dict.setdefault(playlist_id, [])
|
||||
playlist_items.append(playlist_item)
|
||||
|
||||
channel_items_dict = {}
|
||||
utils.update_playlist_items(provider,
|
||||
context,
|
||||
playlist_id_dict,
|
||||
channel_items_dict=channel_items_dict)
|
||||
utils.update_channel_info(provider, context, channel_items_dict)
|
||||
|
||||
# clean up - remove empty entries
|
||||
result = [playlist_item
|
||||
for playlist_items in playlist_id_dict.values()
|
||||
for playlist_item in playlist_items
|
||||
if playlist_item.get_name()]
|
||||
if not result:
|
||||
return False, None
|
||||
|
||||
options = {
|
||||
provider.CONTENT_TYPE: {
|
||||
'content_type': CONTENT.VIDEO_CONTENT,
|
||||
'sub_type': None,
|
||||
'category_label': None,
|
||||
},
|
||||
}
|
||||
return result, options
|
||||
|
||||
video_id = params.get(VIDEO_ID)
|
||||
if video_id:
|
||||
return _extract_urls(video_id)
|
||||
|
||||
channel_ids = params.get(CHANNEL_IDS)
|
||||
if channel_ids:
|
||||
return _display_channels(channel_ids)
|
||||
|
||||
playlist_ids = params.get(PLAYLIST_IDS)
|
||||
if playlist_ids:
|
||||
return _display_playlists(playlist_ids)
|
||||
|
||||
logging.error('Missing video_id or playlist_ids for description links')
|
||||
return False, None
|
||||
|
||||
|
||||
def _process_saved_playlists(provider, context, client):
|
||||
params = context.get_params()
|
||||
|
||||
browse_id = 'FEplaylist_aggregation'
|
||||
browse_response_type = 'playlists'
|
||||
browse_client = 'tv'
|
||||
browse_paths = client.JSON_PATHS['tv_grid']
|
||||
|
||||
own_channel = client.channel_id
|
||||
if own_channel:
|
||||
own_channel = (own_channel,)
|
||||
|
||||
json_data = client.get_browse_items(
|
||||
browse_id=browse_id,
|
||||
client=browse_client,
|
||||
skip_ids=own_channel,
|
||||
response_type=browse_response_type,
|
||||
do_auth=True,
|
||||
page_token=params.get('page_token'),
|
||||
click_tracking=params.get('click_tracking'),
|
||||
visitor=params.get('visitor'),
|
||||
json_path=browse_paths,
|
||||
)
|
||||
if not json_data:
|
||||
return False, None
|
||||
|
||||
filler = partial(
|
||||
client.get_browse_items,
|
||||
browse_id=browse_id,
|
||||
client=browse_client,
|
||||
skip_ids=own_channel,
|
||||
response_type=browse_response_type,
|
||||
do_auth=True,
|
||||
json_path=browse_paths,
|
||||
)
|
||||
json_data['_pre_filler'] = filler
|
||||
json_data['_post_filler'] = filler
|
||||
|
||||
result = v3.response_to_items(
|
||||
provider,
|
||||
context,
|
||||
json_data,
|
||||
allow_duplicates=False,
|
||||
)
|
||||
options = {
|
||||
provider.CONTENT_TYPE: {
|
||||
'content_type': CONTENT.LIST_CONTENT,
|
||||
'sub_type': None,
|
||||
'category_label': None,
|
||||
},
|
||||
}
|
||||
return result, options
|
||||
|
||||
|
||||
def _process_my_subscriptions(provider,
|
||||
context,
|
||||
client,
|
||||
filtered=False,
|
||||
feed_type=None,
|
||||
_feed_types=frozenset((
|
||||
'videos', 'shorts', 'live'
|
||||
))):
|
||||
refresh = context.refresh_requested()
|
||||
|
||||
if feed_type not in _feed_types:
|
||||
feed_type = 'videos'
|
||||
|
||||
with context.get_ui().create_progress_dialog(
|
||||
heading=context.localize('my_subscriptions.loading'),
|
||||
message=context.localize('subscriptions'),
|
||||
background=True,
|
||||
) as progress_dialog:
|
||||
json_data = client.get_my_subscriptions(
|
||||
page_token=context.get_param('page', 1),
|
||||
do_filter=filtered,
|
||||
feed_type=feed_type,
|
||||
refresh=refresh,
|
||||
force_cache=(not refresh
|
||||
and refresh is not False
|
||||
and refresh is not None),
|
||||
progress_dialog=progress_dialog,
|
||||
)
|
||||
if not json_data:
|
||||
return False, None
|
||||
|
||||
filler = partial(
|
||||
client.get_my_subscriptions,
|
||||
do_filter=filtered,
|
||||
feed_type=feed_type,
|
||||
refresh=refresh,
|
||||
force_cache=True,
|
||||
progress_dialog=progress_dialog,
|
||||
)
|
||||
json_data['_post_filler'] = filler
|
||||
|
||||
if filtered:
|
||||
my_subscriptions_path = PATHS.MY_SUBSCRIPTIONS_FILTERED
|
||||
else:
|
||||
my_subscriptions_path = PATHS.MY_SUBSCRIPTIONS
|
||||
|
||||
params = context.get_params()
|
||||
if params.get(PAGE, 1) == 1 and not params.get(HIDE_FOLDERS):
|
||||
v3_response = {
|
||||
'kind': 'plugin#pluginListResponse',
|
||||
'items': [
|
||||
None
|
||||
if feed_type == 'videos' or params.get(HIDE_VIDEOS) else
|
||||
{
|
||||
'kind': 'plugin#videosFolder',
|
||||
'_params': {
|
||||
'name': context.localize('my_subscriptions'),
|
||||
'uri': context.create_uri(my_subscriptions_path),
|
||||
'image': '{media}/new_uploads.png',
|
||||
},
|
||||
},
|
||||
None
|
||||
if feed_type == 'shorts' or params.get(HIDE_SHORTS) else
|
||||
{
|
||||
'kind': 'plugin#shortsFolder',
|
||||
'_params': {
|
||||
'name': context.localize('shorts'),
|
||||
'uri': context.create_uri(
|
||||
(my_subscriptions_path, 'shorts')
|
||||
),
|
||||
'image': '{media}/shorts.png',
|
||||
},
|
||||
},
|
||||
None
|
||||
if feed_type == 'live' or params.get(HIDE_LIVE) else
|
||||
{
|
||||
'kind': 'plugin#liveFolder',
|
||||
'_params': {
|
||||
'name': context.localize('live'),
|
||||
'uri': context.create_uri(
|
||||
(my_subscriptions_path, 'live')
|
||||
),
|
||||
'image': '{media}/live.png',
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
result = v3.response_to_items(provider, context, v3_response)
|
||||
else:
|
||||
result = []
|
||||
|
||||
options = {
|
||||
provider.CONTENT_TYPE: {
|
||||
'content_type': CONTENT.VIDEO_CONTENT,
|
||||
'sub_type': None,
|
||||
'category_label': None,
|
||||
},
|
||||
}
|
||||
result.extend(v3.response_to_items(
|
||||
provider, context, json_data,
|
||||
item_filter={
|
||||
'live_folder': True,
|
||||
'shorts': True,
|
||||
} if feed_type == 'live' else {
|
||||
'live_folder': True,
|
||||
'shorts': True,
|
||||
'vod': True,
|
||||
},
|
||||
))
|
||||
return result, options
|
||||
|
||||
|
||||
def _process_virtual_list(provider, context, _client, playlist_id=None):
|
||||
params = context.get_params()
|
||||
|
||||
playlist_id = playlist_id or params.get(PLAYLIST_ID)
|
||||
if not playlist_id:
|
||||
return False, None
|
||||
playlist_id = playlist_id.upper()
|
||||
context.parse_params({
|
||||
CHANNEL_ID: 'mine',
|
||||
PLAYLIST_ID: playlist_id,
|
||||
})
|
||||
|
||||
resource_manager = provider.get_resource_manager(context)
|
||||
json_data = resource_manager.get_playlist_items(
|
||||
batch_id=(playlist_id, 0),
|
||||
page_token=params.get('page_token'),
|
||||
)
|
||||
if not json_data:
|
||||
return False, None
|
||||
|
||||
filler = partial(
|
||||
resource_manager.get_playlist_items,
|
||||
batch_id=(playlist_id, 0),
|
||||
)
|
||||
json_data['_pre_filler'] = filler
|
||||
json_data['_post_filler'] = filler
|
||||
|
||||
result = v3.response_to_items(
|
||||
provider,
|
||||
context,
|
||||
json_data,
|
||||
allow_duplicates=False,
|
||||
)
|
||||
options = {
|
||||
provider.CONTENT_TYPE: {
|
||||
'content_type': CONTENT.VIDEO_CONTENT,
|
||||
'sub_type': CONTENT.HISTORY if playlist_id == 'HL' else None,
|
||||
'category_label': None,
|
||||
},
|
||||
}
|
||||
return result, options
|
||||
|
||||
|
||||
def process(provider, context, re_match=None, category=None, sub_category=None):
|
||||
if re_match:
|
||||
if category is None:
|
||||
category = re_match.group('category')
|
||||
if sub_category is None:
|
||||
sub_category = re_match.group('sub_category')
|
||||
|
||||
client = provider.get_client(context)
|
||||
|
||||
if category == 'related_videos':
|
||||
return _process_related_videos(provider, context, client)
|
||||
|
||||
if category == 'popular_right_now':
|
||||
return _process_trending(provider, context, client)
|
||||
|
||||
if category == 'recommendations':
|
||||
return _process_recommendations(provider, context, client)
|
||||
|
||||
if category == 'browse_channels':
|
||||
return _process_browse_channels(provider, context, client)
|
||||
|
||||
if category.startswith(('my_subscriptions', 'new_uploaded_videos_tv')):
|
||||
return _process_my_subscriptions(
|
||||
provider,
|
||||
context,
|
||||
client,
|
||||
filtered=category.endswith('_filtered'),
|
||||
feed_type=sub_category,
|
||||
)
|
||||
|
||||
if category == 'disliked_videos':
|
||||
if client.logged_in:
|
||||
return _process_disliked_videos(provider, context, client)
|
||||
return UriItem(context.create_uri(('sign', 'in')))
|
||||
|
||||
if category == 'live':
|
||||
return _process_live_events(
|
||||
provider, context, client, event_type='live'
|
||||
)
|
||||
|
||||
if category == 'upcoming_live':
|
||||
return _process_live_events(
|
||||
provider, context, client, event_type='upcoming'
|
||||
)
|
||||
|
||||
if category == 'completed_live':
|
||||
return _process_live_events(
|
||||
provider, context, client, event_type='completed'
|
||||
)
|
||||
|
||||
if category == 'description_links':
|
||||
return _process_description_links(provider, context)
|
||||
|
||||
if category.endswith('_comments'):
|
||||
return _process_comments(provider, context, client)
|
||||
|
||||
if category == 'saved_playlists':
|
||||
return _process_saved_playlists(provider, context, client)
|
||||
|
||||
if category == 'playlist':
|
||||
return _process_virtual_list(provider, context, client, sub_category)
|
||||
|
||||
raise KodionException('YouTube special category "%s" not found' % category)
|
||||
@@ -0,0 +1,110 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
|
||||
Copyright (C) 2016-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from ..helper import v3
|
||||
from ...kodion import KodionException
|
||||
from ...kodion.constants import CHANNEL_ID, CONTENT, SUBSCRIPTION_ID
|
||||
from ...kodion.items import UriItem
|
||||
|
||||
|
||||
def _process_list(provider, context, client):
|
||||
json_data = client.get_subscription(
|
||||
'mine', page_token=context.get_param('page_token', '')
|
||||
)
|
||||
if not json_data:
|
||||
return []
|
||||
|
||||
result = v3.response_to_items(provider, context, json_data)
|
||||
options = {
|
||||
provider.CONTENT_TYPE: {
|
||||
'content_type': CONTENT.LIST_CONTENT,
|
||||
'sub_type': None,
|
||||
'category_label': None,
|
||||
},
|
||||
}
|
||||
return result, options
|
||||
|
||||
|
||||
def _process_add(_provider, context, client):
|
||||
ui = context.get_ui()
|
||||
li_subscription_id = ui.get_listitem_property(SUBSCRIPTION_ID)
|
||||
|
||||
subscription_id = context.get_param(SUBSCRIPTION_ID)
|
||||
if (not subscription_id
|
||||
and li_subscription_id
|
||||
and li_subscription_id.lower().startswith('uc')):
|
||||
subscription_id = li_subscription_id
|
||||
|
||||
if not subscription_id:
|
||||
return False
|
||||
|
||||
json_data = client.subscribe(subscription_id)
|
||||
if not json_data:
|
||||
return False
|
||||
|
||||
ui.show_notification(
|
||||
context.localize('subscribed.to.channel'),
|
||||
time_ms=2500,
|
||||
audible=False,
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
def _process_remove(provider, context, client):
|
||||
ui = context.get_ui()
|
||||
li_subscription_id = ui.get_listitem_property(SUBSCRIPTION_ID)
|
||||
li_channel_id = ui.get_listitem_property(CHANNEL_ID)
|
||||
|
||||
subscription_id = context.get_param(SUBSCRIPTION_ID)
|
||||
if not subscription_id and li_subscription_id:
|
||||
subscription_id = li_subscription_id
|
||||
|
||||
channel_id = context.get_param(CHANNEL_ID)
|
||||
if not channel_id and li_channel_id:
|
||||
channel_id = li_channel_id
|
||||
|
||||
if subscription_id:
|
||||
success = client.unsubscribe(subscription_id)
|
||||
elif channel_id:
|
||||
success = client.unsubscribe_channel(channel_id)
|
||||
else:
|
||||
success = False
|
||||
|
||||
if not success:
|
||||
return False, None
|
||||
|
||||
ui.show_notification(
|
||||
context.localize('unsubscribed.from.channel'),
|
||||
time_ms=2500,
|
||||
audible=False,
|
||||
)
|
||||
return True, {provider.FORCE_REFRESH: True}
|
||||
|
||||
|
||||
def process(provider, context, re_match):
|
||||
command = re_match.group('command')
|
||||
|
||||
# we need a login
|
||||
client = provider.get_client(context)
|
||||
if not client.logged_in:
|
||||
return UriItem(context.create_uri(('sign', 'in')))
|
||||
|
||||
if command == 'list':
|
||||
return _process_list(provider, context, client)
|
||||
|
||||
if command == 'add':
|
||||
return _process_add(provider, context, client)
|
||||
|
||||
if command == 'remove':
|
||||
return _process_remove(provider, context, client)
|
||||
|
||||
raise KodionException('Unknown subscriptions command: %s' % command)
|
||||
@@ -0,0 +1,139 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
|
||||
Copyright (C) 2016-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from ...kodion import KodionException
|
||||
from ...kodion.constants import URI, VIDEO_ID
|
||||
from ...kodion.items import menu_items
|
||||
|
||||
|
||||
def _process_rate_video(provider,
|
||||
context,
|
||||
re_match=None,
|
||||
video_id=None,
|
||||
current_rating=None,
|
||||
_ratings=('like', 'dislike', 'none')):
|
||||
ui = context.get_ui()
|
||||
li_path = ui.get_listitem_info(URI)
|
||||
|
||||
localize = context.localize
|
||||
|
||||
rating_param = context.get_param('rating', '')
|
||||
if rating_param:
|
||||
rating_param = rating_param.lower()
|
||||
if rating_param not in _ratings:
|
||||
rating_param = ''
|
||||
|
||||
if video_id is None:
|
||||
video_id = context.get_param(VIDEO_ID)
|
||||
if not video_id:
|
||||
try:
|
||||
video_id = re_match.group(VIDEO_ID)
|
||||
except IndexError:
|
||||
pass
|
||||
if not video_id and li_path:
|
||||
video_id = context.parse_item_ids(li_path).get(VIDEO_ID)
|
||||
if not video_id:
|
||||
raise KodionException('video/rate/: missing video_id')
|
||||
|
||||
if current_rating is None:
|
||||
try:
|
||||
current_rating = re_match.group('rating')
|
||||
except IndexError:
|
||||
current_rating = None
|
||||
if not current_rating:
|
||||
client = provider.get_client(context)
|
||||
json_data = client.get_video_rating(video_id)
|
||||
if not json_data:
|
||||
return False, {provider.FALLBACK: False}
|
||||
|
||||
items = json_data.get('items', [])
|
||||
if items:
|
||||
current_rating = items[0].get('rating', '')
|
||||
|
||||
if not rating_param:
|
||||
result = ui.on_select(localize('video.rate'), [
|
||||
(localize('video.rate.%s' % rating), rating)
|
||||
for rating in _ratings
|
||||
if rating != current_rating
|
||||
])
|
||||
elif rating_param != current_rating:
|
||||
result = rating_param
|
||||
else:
|
||||
result = -1
|
||||
|
||||
notify_message = None
|
||||
response = None
|
||||
if result != -1:
|
||||
response = provider.get_client(context).rate_video(video_id, result)
|
||||
if response:
|
||||
if result == 'none':
|
||||
notify_message = localize(('removed.x', 'rating'))
|
||||
elif result == 'like':
|
||||
notify_message = localize('liked.video')
|
||||
elif result == 'dislike':
|
||||
notify_message = localize('disliked.video')
|
||||
else:
|
||||
notify_message = localize('failed')
|
||||
|
||||
if notify_message:
|
||||
ui.show_notification(
|
||||
message=notify_message,
|
||||
time_ms=2500,
|
||||
audible=False,
|
||||
)
|
||||
|
||||
return (
|
||||
True,
|
||||
{
|
||||
# this will be set if we are in the 'Liked Video' playlist
|
||||
provider.FORCE_REFRESH: response and context.refresh_requested(),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def _process_more_for_video(context):
|
||||
params = context.get_params()
|
||||
|
||||
video_id = params.get(VIDEO_ID)
|
||||
if not video_id:
|
||||
raise KodionException('video/more/: missing video_id')
|
||||
|
||||
item_name = params.get('item_name')
|
||||
|
||||
items = [
|
||||
menu_items.playlist_add_to_selected(context, video_id),
|
||||
menu_items.video_related(context, video_id, item_name),
|
||||
menu_items.video_comments(context, video_id, item_name),
|
||||
menu_items.video_description_links(context, video_id, item_name),
|
||||
menu_items.video_rate(context, video_id),
|
||||
] if params.get('logged_in') else [
|
||||
menu_items.video_related(context, video_id, item_name),
|
||||
menu_items.video_comments(context, video_id, item_name),
|
||||
menu_items.video_description_links(context, video_id, item_name),
|
||||
]
|
||||
|
||||
result = context.get_ui().on_select(context.localize('video.more'), items)
|
||||
if result != -1:
|
||||
context.execute(result)
|
||||
|
||||
|
||||
def process(provider, context, re_match=None, command=None, **kwargs):
|
||||
if re_match and command is None:
|
||||
command = re_match.group('command')
|
||||
|
||||
if command == 'rate':
|
||||
return _process_rate_video(provider, context, re_match, **kwargs)
|
||||
|
||||
if command == 'more':
|
||||
return _process_more_for_video(context)
|
||||
|
||||
raise KodionException('Unknown video command: %s' % command)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,30 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
|
||||
Copyright (C) 2016-2025 plugin.video.youtube
|
||||
|
||||
SPDX-License-Identifier: GPL-2.0-only
|
||||
See LICENSES/GPL-2.0-only for more information.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, unicode_literals
|
||||
|
||||
from ..kodion import KodionException
|
||||
from ..kodion.network import InvalidJSONError
|
||||
|
||||
|
||||
class LoginException(KodionException):
|
||||
pass
|
||||
|
||||
|
||||
class YouTubeException(KodionException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidGrant(KodionException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidJSON(KodionException, InvalidJSONError):
|
||||
pass
|
||||
Reference in New Issue
Block a user