-
This commit is contained in:
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
@@ -0,0 +1,17 @@
|
||||
|
||||
def get_imdb_id(uniqueids):
|
||||
imdb_id = uniqueids.get('imdb')
|
||||
if not imdb_id or not imdb_id.startswith('tt'):
|
||||
return None
|
||||
return imdb_id
|
||||
|
||||
# example format for scraper results
|
||||
_ScraperResults = {
|
||||
'info',
|
||||
'ratings',
|
||||
'uniqueids',
|
||||
'cast',
|
||||
'available_art',
|
||||
'error',
|
||||
'warning' # not handled
|
||||
}
|
||||
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
@@ -0,0 +1,88 @@
|
||||
# coding: utf-8
|
||||
#
|
||||
# Copyright (C) 2020, Team Kodi
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""Functions to interact with various web site APIs."""
|
||||
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
import json
|
||||
|
||||
try:
|
||||
import xbmc
|
||||
except ModuleNotFoundError:
|
||||
# only used for logging HTTP calls, not available nor needed for testing
|
||||
xbmc = None
|
||||
|
||||
# from pprint import pformat
|
||||
try: #PY2 / PY3
|
||||
from urllib2 import Request, urlopen
|
||||
from urllib2 import URLError
|
||||
from urllib import urlencode
|
||||
except ImportError:
|
||||
from urllib.request import Request, urlopen
|
||||
from urllib.error import URLError
|
||||
from urllib.parse import urlencode
|
||||
try:
|
||||
from typing import Text, Optional, Union, List, Dict, Any # pylint: disable=unused-import
|
||||
InfoType = Dict[Text, Any] # pylint: disable=invalid-name
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
HEADERS = {}
|
||||
|
||||
|
||||
def set_headers(headers):
|
||||
HEADERS.clear()
|
||||
HEADERS.update(headers)
|
||||
|
||||
|
||||
def load_info(url, params=None, default=None, resp_type = 'json'):
|
||||
# type: (Text, Optional[Dict[Text, Union[Text, List[Text]]]]) -> Union[dict, list]
|
||||
"""
|
||||
Load info from external api
|
||||
|
||||
:param url: API endpoint URL
|
||||
:param params: URL query params
|
||||
:default: object to return if there is an error
|
||||
:resp_type: what to return to the calling function
|
||||
:return: API response or default on error
|
||||
"""
|
||||
theerror = ''
|
||||
if params:
|
||||
url = url + '?' + urlencode(params)
|
||||
if xbmc:
|
||||
xbmc.log('Calling URL "{}"'.format(url), xbmc.LOGDEBUG)
|
||||
if HEADERS:
|
||||
xbmc.log(str(HEADERS), xbmc.LOGDEBUG)
|
||||
req = Request(url, headers=HEADERS)
|
||||
try:
|
||||
response = urlopen(req)
|
||||
except URLError as e:
|
||||
if hasattr(e, 'reason'):
|
||||
theerror = {'error': 'failed to reach the remote site\nReason: {}'.format(e.reason)}
|
||||
elif hasattr(e, 'code'):
|
||||
theerror = {'error': 'remote site unable to fulfill the request\nError code: {}'.format(e.code)}
|
||||
if default is not None:
|
||||
return default
|
||||
else:
|
||||
return theerror
|
||||
if resp_type.lower() == 'json':
|
||||
resp = json.loads(response.read().decode('utf-8'))
|
||||
else:
|
||||
resp = response.read().decode('utf-8')
|
||||
# xbmc.log('the api response:\n{}'.format(pformat(resp)), xbmc.LOGDEBUG)
|
||||
return resp
|
||||
@@ -0,0 +1,93 @@
|
||||
from . import api_utils
|
||||
try:
|
||||
from urllib import quote
|
||||
except ImportError: # py2 / py3
|
||||
from urllib.parse import quote
|
||||
|
||||
API_KEY = '384afe262ee0962545a752ff340e3ce4'
|
||||
API_URL = 'https://webservice.fanart.tv/v3/movies/{}'
|
||||
|
||||
ARTMAP = {
|
||||
'movielogo': 'clearlogo',
|
||||
'hdmovielogo': 'clearlogo',
|
||||
'hdmovieclearart': 'clearart',
|
||||
'movieart': 'clearart',
|
||||
'moviedisc': 'discart',
|
||||
'moviebanner': 'banner',
|
||||
'moviethumb': 'landscape',
|
||||
'moviebackground': 'fanart',
|
||||
'movieposter': 'poster'
|
||||
}
|
||||
|
||||
HEADERS = (
|
||||
('User-Agent', 'Kodi Movie scraper by Team Kodi'),
|
||||
('api-key', API_KEY),
|
||||
)
|
||||
|
||||
|
||||
def get_details(uniqueids, clientkey, language, set_tmdbid):
|
||||
media_id = _get_mediaid(uniqueids)
|
||||
if not media_id:
|
||||
return {}
|
||||
|
||||
movie_data = _get_data(media_id, clientkey)
|
||||
movieset_data = _get_data(set_tmdbid, clientkey) if set_tmdbid else None
|
||||
if not movie_data and not movieset_data:
|
||||
return {}
|
||||
|
||||
movie_art = {}
|
||||
movieset_art = {}
|
||||
if movie_data:
|
||||
movie_art = _parse_data(movie_data, language)
|
||||
if movieset_data:
|
||||
movieset_art = _parse_data(movieset_data, language)
|
||||
movieset_art = {'set.' + key: value for key, value in movieset_art.items()}
|
||||
|
||||
available_art = movie_art
|
||||
available_art.update(movieset_art)
|
||||
|
||||
return {'available_art': available_art}
|
||||
|
||||
def _get_mediaid(uniqueids):
|
||||
for source in ('tmdb', 'imdb', 'unknown'):
|
||||
if source in uniqueids:
|
||||
return uniqueids[source]
|
||||
|
||||
def _get_data(media_id, clientkey):
|
||||
headers = dict(HEADERS)
|
||||
if clientkey:
|
||||
headers['client-key'] = clientkey
|
||||
api_utils.set_headers(headers)
|
||||
fanarttv_url = API_URL.format(media_id)
|
||||
return api_utils.load_info(fanarttv_url, default={})
|
||||
|
||||
def _parse_data(data, language, language_fallback='en'):
|
||||
result = {}
|
||||
for arttype, artlist in data.items():
|
||||
if arttype not in ARTMAP:
|
||||
continue
|
||||
for image in artlist:
|
||||
image_lang = _get_imagelanguage(arttype, image)
|
||||
if image_lang and image_lang != language and image_lang != language_fallback:
|
||||
continue
|
||||
|
||||
generaltype = ARTMAP[arttype]
|
||||
if generaltype == 'poster' and not image_lang:
|
||||
generaltype = 'keyart'
|
||||
if artlist and generaltype not in result:
|
||||
result[generaltype] = []
|
||||
|
||||
url = quote(image['url'], safe="%/:=&?~#+!$,;'@()*[]")
|
||||
resultimage = {'url': url, 'preview': url.replace('.fanart.tv/fanart/', '.fanart.tv/preview/'), 'lang': image_lang}
|
||||
result[generaltype].append(resultimage)
|
||||
|
||||
return result
|
||||
|
||||
def _get_imagelanguage(arttype, image):
|
||||
if 'lang' not in image or arttype == 'moviebackground':
|
||||
return None
|
||||
if arttype in ('movielogo', 'hdmovielogo', 'hdmovieclearart', 'movieart', 'moviebanner',
|
||||
'moviethumb', 'moviedisc'):
|
||||
return image['lang'] if image['lang'] not in ('', '00') else 'en'
|
||||
# movieposter may or may not have a title and thus need a language
|
||||
return image['lang'] if image['lang'] not in ('', '00') else None
|
||||
+111
@@ -0,0 +1,111 @@
|
||||
# -*- coding: UTF-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020, Team Kodi
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
# IMDb ratings based on code in metadata.themoviedb.org.python by Team Kodi
|
||||
# pylint: disable=missing-docstring
|
||||
|
||||
import json
|
||||
import re
|
||||
from . import api_utils
|
||||
from . import get_imdb_id
|
||||
|
||||
IMDB_RATINGS_URL = 'https://www.imdb.com/title/{}/'
|
||||
IMDB_LDJSON_REGEX = re.compile(r'<script type="application/ld\+json">(.*?)</script>', re.DOTALL)
|
||||
IMDB_TOP250_REGEX = re.compile(r'Top rated movie #(\d+)')
|
||||
|
||||
# previous IMDB page design before June 2021
|
||||
IMDB_RATING_REGEX_PREVIOUS = re.compile(r'itemprop="ratingValue".*?>.*?([\d.]+).*?<')
|
||||
IMDB_VOTES_REGEX_PREVIOUS = re.compile(r'itemprop="ratingCount".*?>.*?([\d,]+).*?<')
|
||||
IMDB_TOP250_REGEX_PREVIOUS = re.compile(r'Top Rated Movies #(\d+)')
|
||||
|
||||
HEADERS = (
|
||||
('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'),
|
||||
('Accept', 'application/json'),
|
||||
)
|
||||
|
||||
def get_details(uniqueids):
|
||||
imdb_id = get_imdb_id(uniqueids)
|
||||
if not imdb_id:
|
||||
return {}
|
||||
votes, rating, top250 = _get_ratinginfo(imdb_id)
|
||||
return _assemble_imdb_result(votes, rating, top250)
|
||||
|
||||
def _get_ratinginfo(imdb_id):
|
||||
api_utils.set_headers(dict(HEADERS))
|
||||
response = api_utils.load_info(IMDB_RATINGS_URL.format(imdb_id), default = '', resp_type='text')
|
||||
return _parse_imdb_result(response)
|
||||
|
||||
def _assemble_imdb_result(votes, rating, top250):
|
||||
result = {}
|
||||
if top250:
|
||||
result['info'] = {'top250': top250}
|
||||
if votes and rating:
|
||||
result['ratings'] = {'imdb': {'votes': votes, 'rating': rating}}
|
||||
return result
|
||||
|
||||
def _parse_imdb_result(input_html):
|
||||
rating, votes = _parse_imdb_rating_and_votes(input_html)
|
||||
if rating is None or votes is None:
|
||||
# try previous parsers
|
||||
rating = _parse_imdb_rating_previous(input_html)
|
||||
votes = _parse_imdb_votes_previous(input_html)
|
||||
top250 = _parse_imdb_top250(input_html)
|
||||
if top250 is None:
|
||||
top250 = _parse_imdb_top250_previous(input_html)
|
||||
|
||||
return votes, rating, top250
|
||||
|
||||
def _parse_imdb_rating_and_votes(input_html):
|
||||
match = re.search(IMDB_LDJSON_REGEX, input_html)
|
||||
if not match:
|
||||
return None, None
|
||||
|
||||
try:
|
||||
ldjson = json.loads(match.group(1).replace('\n', ''))
|
||||
except json.decoder.JSONDecodeError:
|
||||
return None, None
|
||||
|
||||
try:
|
||||
aggregateRating = ldjson.get('aggregateRating', {})
|
||||
rating_value = aggregateRating.get('ratingValue')
|
||||
return rating_value, aggregateRating.get('ratingCount')
|
||||
except AttributeError:
|
||||
return None, None
|
||||
|
||||
def _parse_imdb_top250(input_html):
|
||||
match = re.search(IMDB_TOP250_REGEX, input_html)
|
||||
if match:
|
||||
return int(match.group(1))
|
||||
return None
|
||||
|
||||
def _parse_imdb_rating_previous(input_html):
|
||||
match = re.search(IMDB_RATING_REGEX_PREVIOUS, input_html)
|
||||
if (match):
|
||||
return float(match.group(1))
|
||||
return None
|
||||
|
||||
def _parse_imdb_votes_previous(input_html):
|
||||
match = re.search(IMDB_VOTES_REGEX_PREVIOUS, input_html)
|
||||
if (match):
|
||||
return int(match.group(1).replace(',', ''))
|
||||
return None
|
||||
|
||||
def _parse_imdb_top250_previous(input_html):
|
||||
match = re.search(IMDB_TOP250_REGEX_PREVIOUS, input_html)
|
||||
if (match):
|
||||
return int(match.group(1))
|
||||
return None
|
||||
@@ -0,0 +1,270 @@
|
||||
from datetime import datetime, timedelta
|
||||
from . import tmdbapi
|
||||
|
||||
class TMDBMovieScraper(object):
|
||||
def __init__(self, url_settings, language, certification_country, search_language=""):
|
||||
self.url_settings = url_settings
|
||||
self.language = language
|
||||
self.certification_country = certification_country
|
||||
if(search_language == ""):
|
||||
self.search_language = language
|
||||
else:
|
||||
self.search_language = search_language
|
||||
self._urls = None
|
||||
|
||||
@property
|
||||
def urls(self):
|
||||
if not self._urls:
|
||||
self._urls = _load_base_urls(self.url_settings)
|
||||
return self._urls
|
||||
|
||||
def search(self, title, year=None):
|
||||
|
||||
def is_best(item):
|
||||
return item['title'].lower() == title and (
|
||||
not year or item.get('release_date', '').startswith(year))
|
||||
|
||||
search_media_id = _parse_media_id(title)
|
||||
if search_media_id:
|
||||
if search_media_id['type'] == 'tmdb':
|
||||
result = _get_movie(search_media_id['id'], None, True)
|
||||
if 'error' in result:
|
||||
return result
|
||||
result = [result]
|
||||
else:
|
||||
result = tmdbapi.find_movie_by_external_id(search_media_id['id'], language=self.search_language)
|
||||
if 'error' in result:
|
||||
return result
|
||||
result = result.get('movie_results')
|
||||
else:
|
||||
response = tmdbapi.search_movie(query=title, year=year, language=self.search_language)
|
||||
if 'error' in response:
|
||||
return response
|
||||
result = response['results']
|
||||
# get second page if available and if first page doesn't contain an `is_best` result with popularity > 5
|
||||
if response['total_pages'] > 1:
|
||||
bests = [item for item in result if is_best(item) and item.get('popularity',0) > 5]
|
||||
if not bests:
|
||||
response = tmdbapi.search_movie(query=title, year=year, language=self.language, page=2)
|
||||
if not 'error' in response:
|
||||
result += response['results']
|
||||
urls = self.urls
|
||||
|
||||
if result:
|
||||
# move all `is_best` results at the beginning of the list, sort them by popularity (if found):
|
||||
bests_first = sorted([item for item in result if is_best(item)], key=lambda k: k.get('popularity',0), reverse=True)
|
||||
result = bests_first + [item for item in result if item not in bests_first]
|
||||
|
||||
for item in result:
|
||||
if item.get('poster_path'):
|
||||
item['poster_path'] = urls['preview'] + item['poster_path']
|
||||
if item.get('backdrop_path'):
|
||||
item['backdrop_path'] = urls['preview'] + item['backdrop_path']
|
||||
return result
|
||||
|
||||
def get_details(self, uniqueids):
|
||||
media_id = uniqueids.get('tmdb')
|
||||
if not media_id:
|
||||
imdb_id = uniqueids.get('imdb')
|
||||
if not imdb_id:
|
||||
return None
|
||||
|
||||
find_results = tmdbapi.find_movie_by_external_id(imdb_id)
|
||||
if 'error' in find_results:
|
||||
return find_results
|
||||
if find_results.get('movie_results'):
|
||||
movie = find_results['movie_results'][0]
|
||||
media_id = movie['id']
|
||||
if not media_id:
|
||||
return None
|
||||
|
||||
details = self._gather_details(media_id)
|
||||
if not details:
|
||||
return None
|
||||
if details.get('error'):
|
||||
return details
|
||||
return self._assemble_details(**details)
|
||||
|
||||
def _gather_details(self, media_id):
|
||||
movie = _get_movie(media_id, self.language)
|
||||
if not movie or movie.get('error'):
|
||||
return movie
|
||||
|
||||
# don't specify language to get English text for fallback
|
||||
movie_fallback = _get_movie(media_id)
|
||||
movie['images'] = movie_fallback['images']
|
||||
|
||||
collection = _get_moviecollection(movie['belongs_to_collection'].get('id'), self.language) if \
|
||||
movie['belongs_to_collection'] else None
|
||||
collection_fallback = _get_moviecollection(movie['belongs_to_collection'].get('id')) if \
|
||||
movie['belongs_to_collection'] else None
|
||||
if collection and collection_fallback and 'images' in collection_fallback:
|
||||
collection['images'] = collection_fallback['images']
|
||||
|
||||
return {'movie': movie, 'movie_fallback': movie_fallback, 'collection': collection,
|
||||
'collection_fallback': collection_fallback}
|
||||
|
||||
def _assemble_details(self, movie, movie_fallback, collection, collection_fallback):
|
||||
info = {
|
||||
'title': movie['title'],
|
||||
'originaltitle': movie['original_title'],
|
||||
'plot': movie.get('overview') or movie_fallback.get('overview'),
|
||||
'tagline': movie.get('tagline') or movie_fallback.get('tagline'),
|
||||
'studio': _get_names(movie['production_companies']),
|
||||
'genre': _get_names(movie['genres']),
|
||||
'country': _get_names(movie['production_countries']),
|
||||
'credits': _get_cast_members(movie['casts'], 'crew', 'Writing', ['Screenplay', 'Writer', 'Author']),
|
||||
'director': _get_cast_members(movie['casts'], 'crew', 'Directing', ['Director']),
|
||||
'premiered': movie['release_date'],
|
||||
'tag': _get_names(movie['keywords']['keywords'])
|
||||
}
|
||||
|
||||
if 'countries' in movie['releases']:
|
||||
certcountry = self.certification_country.upper()
|
||||
for country in movie['releases']['countries']:
|
||||
if country['iso_3166_1'] == certcountry and country['certification']:
|
||||
info['mpaa'] = country['certification']
|
||||
break
|
||||
|
||||
trailer = _parse_trailer(movie.get('trailers', {}), movie_fallback.get('trailers', {}))
|
||||
if trailer:
|
||||
info['trailer'] = trailer
|
||||
if collection:
|
||||
info['set'] = collection.get('name') or collection_fallback.get('name')
|
||||
info['setoverview'] = collection.get('overview') or collection_fallback.get('overview')
|
||||
if movie.get('runtime'):
|
||||
info['duration'] = movie['runtime'] * 60
|
||||
|
||||
ratings = {'themoviedb': {'rating': float(movie['vote_average']), 'votes': int(movie['vote_count'])}}
|
||||
uniqueids = {'tmdb': str(movie['id']), 'imdb': movie['imdb_id']}
|
||||
cast = [{
|
||||
'name': actor['name'],
|
||||
'role': actor['character'],
|
||||
'thumbnail': self.urls['original'] + actor['profile_path']
|
||||
if actor['profile_path'] else "",
|
||||
'order': actor['order']
|
||||
}
|
||||
for actor in movie['casts'].get('cast', [])
|
||||
]
|
||||
available_art = _parse_artwork(movie, collection, self.urls, self.language)
|
||||
|
||||
_info = {'set_tmdbid': movie['belongs_to_collection'].get('id')
|
||||
if movie['belongs_to_collection'] else None}
|
||||
|
||||
return {'info': info, 'ratings': ratings, 'uniqueids': uniqueids, 'cast': cast,
|
||||
'available_art': available_art, '_info': _info}
|
||||
|
||||
def _parse_media_id(title):
|
||||
if title.startswith('tt') and title[2:].isdigit():
|
||||
return {'type': 'imdb', 'id':title} # IMDB ID works alone because it is clear
|
||||
title = title.lower()
|
||||
if title.startswith('tmdb/') and title[5:].isdigit(): # TMDB ID
|
||||
return {'type': 'tmdb', 'id':title[5:]}
|
||||
elif title.startswith('imdb/tt') and title[7:].isdigit(): # IMDB ID with prefix to match
|
||||
return {'type': 'imdb', 'id':title[5:]}
|
||||
return None
|
||||
|
||||
def _get_movie(mid, language=None, search=False):
|
||||
details = None if search else \
|
||||
'trailers,images,releases,casts,keywords' if language is not None else \
|
||||
'trailers,images'
|
||||
return tmdbapi.get_movie(mid, language=language, append_to_response=details)
|
||||
|
||||
def _get_moviecollection(collection_id, language=None):
|
||||
if not collection_id:
|
||||
return None
|
||||
details = 'images'
|
||||
return tmdbapi.get_collection(collection_id, language=language, append_to_response=details)
|
||||
|
||||
def _parse_artwork(movie, collection, urlbases, language):
|
||||
if language:
|
||||
# Image languages don't have regional variants
|
||||
language = language.split('-')[0]
|
||||
posters = []
|
||||
landscape = []
|
||||
logos = []
|
||||
fanart = []
|
||||
|
||||
if 'images' in movie:
|
||||
posters = _build_image_list_with_fallback(movie['images']['posters'], urlbases, language)
|
||||
landscape = _build_image_list_with_fallback(movie['images']['backdrops'], urlbases, language)
|
||||
logos = _build_image_list_with_fallback(movie['images']['logos'], urlbases, language)
|
||||
fanart = _build_fanart_list(movie['images']['backdrops'], urlbases)
|
||||
|
||||
setposters = []
|
||||
setlandscape = []
|
||||
setfanart = []
|
||||
if collection and 'images' in collection:
|
||||
setposters = _build_image_list_with_fallback(collection['images']['posters'], urlbases, language)
|
||||
setlandscape = _build_image_list_with_fallback(collection['images']['backdrops'], urlbases, language)
|
||||
setfanart = _build_fanart_list(collection['images']['backdrops'], urlbases)
|
||||
|
||||
return {'poster': posters, 'landscape': landscape, 'fanart': fanart,
|
||||
'set.poster': setposters, 'set.landscape': setlandscape, 'set.fanart': setfanart, 'clearlogo': logos}
|
||||
|
||||
def _build_image_list_with_fallback(imagelist, urlbases, language, language_fallback='en'):
|
||||
images = _build_image_list(imagelist, urlbases, [language])
|
||||
|
||||
# Add backup images
|
||||
if language != language_fallback:
|
||||
images.extend(_build_image_list(imagelist, urlbases, [language_fallback]))
|
||||
|
||||
# Add any images if nothing set so far
|
||||
if not images:
|
||||
images = _build_image_list(imagelist, urlbases)
|
||||
|
||||
return images
|
||||
|
||||
def _build_fanart_list(imagelist, urlbases):
|
||||
return _build_image_list(imagelist, urlbases, ['xx', None])
|
||||
|
||||
def _build_image_list(imagelist, urlbases, languages=[]):
|
||||
result = []
|
||||
for img in imagelist:
|
||||
if languages and img['iso_639_1'] not in languages:
|
||||
continue
|
||||
if img['file_path'].endswith('.svg'):
|
||||
continue
|
||||
result.append({
|
||||
'url': urlbases['original'] + img['file_path'],
|
||||
'preview': urlbases['preview'] + img['file_path'],
|
||||
'lang': img['iso_639_1']
|
||||
})
|
||||
return result
|
||||
|
||||
def _get_date_numeric(datetime_):
|
||||
return (datetime_ - datetime(1970, 1, 1)).total_seconds()
|
||||
|
||||
def _load_base_urls(url_settings):
|
||||
urls = {}
|
||||
urls['original'] = url_settings.getSettingString('originalUrl')
|
||||
urls['preview'] = url_settings.getSettingString('previewUrl')
|
||||
last_updated = url_settings.getSettingString('lastUpdated')
|
||||
if not urls['original'] or not urls['preview'] or not last_updated or \
|
||||
float(last_updated) < _get_date_numeric(datetime.now() - timedelta(days=30)):
|
||||
conf = tmdbapi.get_configuration()
|
||||
if conf:
|
||||
urls['original'] = conf['images']['secure_base_url'] + 'original'
|
||||
urls['preview'] = conf['images']['secure_base_url'] + 'w780'
|
||||
url_settings.setSetting('originalUrl', urls['original'])
|
||||
url_settings.setSetting('previewUrl', urls['preview'])
|
||||
url_settings.setSetting('lastUpdated', str(_get_date_numeric(datetime.now())))
|
||||
return urls
|
||||
|
||||
def _parse_trailer(trailers, fallback):
|
||||
if trailers.get('youtube'):
|
||||
return 'plugin://plugin.video.youtube/play/?video_id='+trailers['youtube'][0]['source']
|
||||
if fallback.get('youtube'):
|
||||
return 'plugin://plugin.video.youtube/play/?video_id='+fallback['youtube'][0]['source']
|
||||
return None
|
||||
|
||||
def _get_names(items):
|
||||
return [item['name'] for item in items] if items else []
|
||||
|
||||
def _get_cast_members(casts, casttype, department, jobs):
|
||||
result = []
|
||||
if casttype in casts:
|
||||
for cast in casts[casttype]:
|
||||
if cast['department'] == department and cast['job'] in jobs and cast['name'] not in result:
|
||||
result.append(cast['name'])
|
||||
return result
|
||||
@@ -0,0 +1,144 @@
|
||||
# -*- coding: UTF-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020, Team Kodi
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
# pylint: disable=missing-docstring
|
||||
|
||||
"""Functions to interact with TMDb API."""
|
||||
|
||||
import unicodedata
|
||||
from . import api_utils
|
||||
try:
|
||||
import xbmc
|
||||
except ModuleNotFoundError:
|
||||
# only used for logging HTTP calls, not available nor needed for testing
|
||||
xbmc = None
|
||||
try:
|
||||
from typing import Optional, Text, Dict, List, Any # pylint: disable=unused-import
|
||||
InfoType = Dict[Text, Any] # pylint: disable=invalid-name
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
HEADERS = (
|
||||
('User-Agent', 'Kodi Movie scraper by Team Kodi'),
|
||||
('Accept', 'application/json'),
|
||||
)
|
||||
|
||||
TMDB_PARAMS = {'api_key': 'f090bb54758cabf231fb605d3e3e0468'}
|
||||
BASE_URL = 'https://api.themoviedb.org/3/{}'
|
||||
SEARCH_URL = BASE_URL.format('search/movie')
|
||||
FIND_URL = BASE_URL.format('find/{}')
|
||||
MOVIE_URL = BASE_URL.format('movie/{}')
|
||||
COLLECTION_URL = BASE_URL.format('collection/{}')
|
||||
CONFIG_URL = BASE_URL.format('configuration')
|
||||
|
||||
def log(message):
|
||||
if xbmc:
|
||||
xbmc.log(message, xbmc.LOGDEBUG)
|
||||
|
||||
def search_movie(query, year=None, language=None, page=None):
|
||||
# type: (Text) -> List[InfoType]
|
||||
"""
|
||||
Search for a movie
|
||||
|
||||
:param title: movie title to search
|
||||
:param year: the year to search (optional)
|
||||
:param language: the language filter for TMDb (optional)
|
||||
:param page: the results page to return (optional)
|
||||
:return: a list with found movies
|
||||
"""
|
||||
query = unicodedata.normalize('NFC', query)
|
||||
log('using title of %s to find movie' % query)
|
||||
theurl = SEARCH_URL
|
||||
params = _set_params(None, language)
|
||||
params['query'] = query
|
||||
if page is not None:
|
||||
params['page'] = page
|
||||
if year is not None:
|
||||
params['year'] = str(year)
|
||||
api_utils.set_headers(dict(HEADERS))
|
||||
return api_utils.load_info(theurl, params=params)
|
||||
|
||||
|
||||
def find_movie_by_external_id(external_id, language=None):
|
||||
# type: (Text) -> List[InfoType]
|
||||
"""
|
||||
Find movie based on external ID
|
||||
|
||||
:param mid: external ID
|
||||
:param language: the language filter for TMDb (optional)
|
||||
:return: the movie or error
|
||||
"""
|
||||
log('using external id of %s to find movie' % external_id)
|
||||
theurl = FIND_URL.format(external_id)
|
||||
params = _set_params(None, language)
|
||||
params['external_source'] = 'imdb_id'
|
||||
api_utils.set_headers(dict(HEADERS))
|
||||
return api_utils.load_info(theurl, params=params)
|
||||
|
||||
|
||||
|
||||
def get_movie(mid, language=None, append_to_response=None):
|
||||
# type: (Text) -> List[InfoType]
|
||||
"""
|
||||
Get movie details
|
||||
|
||||
:param mid: TMDb movie ID
|
||||
:param language: the language filter for TMDb (optional)
|
||||
:append_to_response: the additional data to get from TMDb (optional)
|
||||
:return: the movie or error
|
||||
"""
|
||||
log('using movie id of %s to get movie details' % mid)
|
||||
theurl = MOVIE_URL.format(mid)
|
||||
api_utils.set_headers(dict(HEADERS))
|
||||
return api_utils.load_info(theurl, params=_set_params(append_to_response, language))
|
||||
|
||||
|
||||
def get_collection(collection_id, language=None, append_to_response=None):
|
||||
# type: (Text) -> List[InfoType]
|
||||
"""
|
||||
Get movie collection information
|
||||
|
||||
:param collection_id: TMDb collection ID
|
||||
:param language: the language filter for TMDb (optional)
|
||||
:append_to_response: the additional data to get from TMDb (optional)
|
||||
:return: the movie or error
|
||||
"""
|
||||
log('using collection id of %s to get collection details' % collection_id)
|
||||
theurl = COLLECTION_URL.format(collection_id)
|
||||
api_utils.set_headers(dict(HEADERS))
|
||||
return api_utils.load_info(theurl, params=_set_params(append_to_response, language))
|
||||
|
||||
|
||||
def get_configuration():
|
||||
# type: (Text) -> List[InfoType]
|
||||
"""
|
||||
Get configuration information
|
||||
|
||||
:return: configuration details or error
|
||||
"""
|
||||
log('getting configuration details')
|
||||
api_utils.set_headers(dict(HEADERS))
|
||||
return api_utils.load_info(CONFIG_URL, params=TMDB_PARAMS.copy())
|
||||
|
||||
|
||||
def _set_params(append_to_response, language):
|
||||
params = TMDB_PARAMS.copy()
|
||||
if language is not None:
|
||||
params['language'] = language
|
||||
if append_to_response is not None:
|
||||
params['append_to_response'] = append_to_response
|
||||
return params
|
||||
+55
@@ -0,0 +1,55 @@
|
||||
# -*- coding: UTF-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020, Team Kodi
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
# pylint: disable=missing-docstring
|
||||
|
||||
"""Functions to interact with Trakt API."""
|
||||
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
from . import api_utils
|
||||
from . import get_imdb_id
|
||||
try:
|
||||
from typing import Optional, Text, Dict, List, Any # pylint: disable=unused-import
|
||||
InfoType = Dict[Text, Any] # pylint: disable=invalid-name
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
HEADERS = (
|
||||
('User-Agent', 'Kodi Movie scraper by Team Kodi'),
|
||||
('Accept', 'application/json'),
|
||||
('trakt-api-key', '5f2dc73b6b11c2ac212f5d8b4ec8f3dc4b727bb3f026cd254d89eda997fe64ae'),
|
||||
('trakt-api-version', '2'),
|
||||
('Content-Type', 'application/json'),
|
||||
)
|
||||
|
||||
MOVIE_URL = 'https://api.trakt.tv/movies/{}'
|
||||
|
||||
|
||||
def get_trakt_ratinginfo(uniqueids):
|
||||
imdb_id = get_imdb_id(uniqueids)
|
||||
result = {}
|
||||
url = MOVIE_URL.format(imdb_id)
|
||||
params = {'extended': 'full'}
|
||||
api_utils.set_headers(dict(HEADERS))
|
||||
movie_info = api_utils.load_info(url, params=params, default={})
|
||||
if(movie_info):
|
||||
if 'votes' in movie_info and 'rating' in movie_info:
|
||||
result['ratings'] = {'trakt': {'votes': int(movie_info['votes']), 'rating': float(movie_info['rating'])}}
|
||||
elif 'rating' in movie_info:
|
||||
result['ratings'] = {'trakt': {'rating': float(movie_info['rating'])}}
|
||||
return result
|
||||
@@ -0,0 +1,224 @@
|
||||
import json
|
||||
import sys
|
||||
import xbmc
|
||||
import xbmcaddon
|
||||
import xbmcgui
|
||||
import xbmcplugin
|
||||
|
||||
from lib.tmdbscraper.tmdb import TMDBMovieScraper
|
||||
from lib.tmdbscraper.fanarttv import get_details as get_fanarttv_artwork
|
||||
from lib.tmdbscraper.imdbratings import get_details as get_imdb_details
|
||||
from lib.tmdbscraper.traktratings import get_trakt_ratinginfo
|
||||
from scraper_datahelper import combine_scraped_details_info_and_ratings, \
|
||||
combine_scraped_details_available_artwork, find_uniqueids_in_text, get_params
|
||||
from scraper_config import configure_scraped_details, PathSpecificSettings, \
|
||||
configure_tmdb_artwork, is_fanarttv_configured
|
||||
|
||||
ADDON_SETTINGS = xbmcaddon.Addon()
|
||||
ID = ADDON_SETTINGS.getAddonInfo('id')
|
||||
|
||||
def log(msg, level=xbmc.LOGDEBUG):
|
||||
xbmc.log(msg='[{addon}]: {msg}'.format(addon=ID, msg=msg), level=level)
|
||||
|
||||
def get_tmdb_scraper(settings):
|
||||
language = settings.getSettingString('language')
|
||||
certcountry = settings.getSettingString('tmdbcertcountry')
|
||||
search_language = settings.getSettingString('searchlanguage')
|
||||
return TMDBMovieScraper(ADDON_SETTINGS, language, certcountry, search_language)
|
||||
|
||||
def search_for_movie(title, year, handle, settings):
|
||||
log("Find movie with title '{title}' from year '{year}'".format(title=title, year=year), xbmc.LOGINFO)
|
||||
title = _strip_trailing_article(title)
|
||||
scraper = get_tmdb_scraper(settings)
|
||||
|
||||
search_results = scraper.search(title, year)
|
||||
if year is not None:
|
||||
if not search_results:
|
||||
search_results = scraper.search(title,str(int(year)-1))
|
||||
if not search_results:
|
||||
search_results = scraper.search(title,str(int(year)+1))
|
||||
if not search_results:
|
||||
search_results = scraper.search(title)
|
||||
if not search_results:
|
||||
return
|
||||
|
||||
if 'error' in search_results:
|
||||
header = "The Movie Database Python error searching with web service TMDB"
|
||||
xbmcgui.Dialog().notification(header, search_results['error'], xbmcgui.NOTIFICATION_WARNING)
|
||||
log(header + ': ' + search_results['error'], xbmc.LOGWARNING)
|
||||
return
|
||||
|
||||
for movie in search_results:
|
||||
listitem = _searchresult_to_listitem(movie)
|
||||
uniqueids = {'tmdb': str(movie['id'])}
|
||||
xbmcplugin.addDirectoryItem(handle=handle, url=build_lookup_string(uniqueids),
|
||||
listitem=listitem, isFolder=True)
|
||||
|
||||
_articles = [prefix + article for prefix in (', ', ' ') for article in ("the", "a", "an")]
|
||||
def _strip_trailing_article(title):
|
||||
title = title.lower()
|
||||
for article in _articles:
|
||||
if title.endswith(article):
|
||||
return title[:-len(article)]
|
||||
return title
|
||||
|
||||
def _searchresult_to_listitem(movie):
|
||||
movie_label = movie['title']
|
||||
|
||||
movie_year = movie['release_date'].split('-')[0] if movie.get('release_date') else None
|
||||
if movie_year:
|
||||
movie_label += ' ({})'.format(movie_year)
|
||||
|
||||
listitem = xbmcgui.ListItem(movie_label, offscreen=True)
|
||||
|
||||
infotag = listitem.getVideoInfoTag()
|
||||
infotag.setTitle(movie['title'])
|
||||
if movie_year:
|
||||
infotag.setYear(int(movie_year))
|
||||
|
||||
if movie['poster_path']:
|
||||
listitem.setArt({'thumb': movie['poster_path']})
|
||||
|
||||
return listitem
|
||||
|
||||
# Default limit of 10 because a big list of artwork can cause trouble in some cases
|
||||
# (a column can be too large for the MySQL integration),
|
||||
# and how useful is a big list anyway? Not exactly rhetorical, this is an experiment.
|
||||
def add_artworks(listitem, artworks, IMAGE_LIMIT):
|
||||
infotag = listitem.getVideoInfoTag()
|
||||
for arttype, artlist in artworks.items():
|
||||
if arttype == 'fanart':
|
||||
continue
|
||||
for image in artlist[:IMAGE_LIMIT]:
|
||||
infotag.addAvailableArtwork(image['url'], arttype)
|
||||
|
||||
fanart_to_set = [{'image': image['url'], 'preview': image['preview']}
|
||||
for image in artworks.get('fanart', ())[:IMAGE_LIMIT]]
|
||||
listitem.setAvailableFanart(fanart_to_set)
|
||||
|
||||
def get_details(input_uniqueids, handle, settings, fail_silently=False):
|
||||
if not input_uniqueids:
|
||||
return False
|
||||
details = get_tmdb_scraper(settings).get_details(input_uniqueids)
|
||||
if not details:
|
||||
return False
|
||||
if 'error' in details:
|
||||
if fail_silently:
|
||||
return False
|
||||
header = "The Movie Database Python error with web service TMDB"
|
||||
xbmcgui.Dialog().notification(header, details['error'], xbmcgui.NOTIFICATION_WARNING)
|
||||
log(header + ': ' + details['error'], xbmc.LOGWARNING)
|
||||
return False
|
||||
|
||||
details = configure_tmdb_artwork(details, settings)
|
||||
|
||||
if settings.getSettingString('RatingS') == 'IMDb' or settings.getSettingBool('imdbanyway'):
|
||||
imdbinfo = get_imdb_details(details['uniqueids'])
|
||||
if 'error' in imdbinfo:
|
||||
header = "The Movie Database Python error with website IMDB"
|
||||
log(header + ': ' + imdbinfo['error'], xbmc.LOGWARNING)
|
||||
else:
|
||||
details = combine_scraped_details_info_and_ratings(details, imdbinfo)
|
||||
|
||||
if settings.getSettingString('RatingS') == 'Trakt' or settings.getSettingBool('traktanyway'):
|
||||
traktinfo = get_trakt_ratinginfo(details['uniqueids'])
|
||||
details = combine_scraped_details_info_and_ratings(details, traktinfo)
|
||||
|
||||
if is_fanarttv_configured(settings):
|
||||
fanarttv_info = get_fanarttv_artwork(details['uniqueids'],
|
||||
settings.getSettingString('fanarttv_clientkey'),
|
||||
settings.getSettingString('fanarttv_language'),
|
||||
details['_info']['set_tmdbid'])
|
||||
details = combine_scraped_details_available_artwork(details,
|
||||
fanarttv_info,
|
||||
settings.getSettingString('language'),
|
||||
settings)
|
||||
|
||||
details = configure_scraped_details(details, settings)
|
||||
|
||||
listitem = xbmcgui.ListItem(details['info']['title'], offscreen=True)
|
||||
infotag = listitem.getVideoInfoTag()
|
||||
set_info(infotag, details['info'])
|
||||
infotag.setCast(build_cast(details['cast']))
|
||||
infotag.setUniqueIDs(details['uniqueids'], 'tmdb')
|
||||
infotag.setRatings(build_ratings(details['ratings']), find_defaultrating(details['ratings']))
|
||||
IMAGE_LIMIT = settings.getSettingInt('maxartwork')
|
||||
add_artworks(listitem, details['available_art'], IMAGE_LIMIT)
|
||||
|
||||
xbmcplugin.setResolvedUrl(handle=handle, succeeded=True, listitem=listitem)
|
||||
return True
|
||||
|
||||
def set_info(infotag: xbmc.InfoTagVideo, info_dict):
|
||||
infotag.setTitle(info_dict['title'])
|
||||
infotag.setOriginalTitle(info_dict['originaltitle'])
|
||||
infotag.setPlot(info_dict['plot'])
|
||||
infotag.setTagLine(info_dict['tagline'])
|
||||
infotag.setStudios(info_dict['studio'])
|
||||
infotag.setGenres(info_dict['genre'])
|
||||
infotag.setCountries(info_dict['country'])
|
||||
infotag.setWriters(info_dict['credits'])
|
||||
infotag.setDirectors(info_dict['director'])
|
||||
infotag.setPremiered(info_dict['premiered'])
|
||||
if 'tag' in info_dict:
|
||||
infotag.setTags(info_dict['tag'])
|
||||
if 'mpaa' in info_dict:
|
||||
infotag.setMpaa(info_dict['mpaa'])
|
||||
if 'trailer' in info_dict:
|
||||
infotag.setTrailer(info_dict['trailer'])
|
||||
if 'set' in info_dict:
|
||||
infotag.setSet(info_dict['set'])
|
||||
infotag.setSetOverview(info_dict['setoverview'])
|
||||
if 'duration' in info_dict:
|
||||
infotag.setDuration(info_dict['duration'])
|
||||
if 'top250' in info_dict:
|
||||
infotag.setTop250(info_dict['top250'])
|
||||
|
||||
def build_cast(cast_list):
|
||||
return [xbmc.Actor(cast['name'], cast['role'], cast['order'], cast['thumbnail']) for cast in cast_list]
|
||||
|
||||
def build_ratings(rating_dict):
|
||||
return {key: (value['rating'], value.get('votes', 0)) for key, value in rating_dict.items()}
|
||||
|
||||
def find_defaultrating(rating_dict):
|
||||
return next((key for key, value in rating_dict.items() if value['default']), None)
|
||||
|
||||
def find_uniqueids_in_nfo(nfo, handle):
|
||||
uniqueids = find_uniqueids_in_text(nfo)
|
||||
if uniqueids:
|
||||
listitem = xbmcgui.ListItem(offscreen=True)
|
||||
xbmcplugin.addDirectoryItem(
|
||||
handle=handle, url=build_lookup_string(uniqueids), listitem=listitem, isFolder=True)
|
||||
|
||||
def build_lookup_string(uniqueids):
|
||||
return json.dumps(uniqueids)
|
||||
|
||||
def parse_lookup_string(uniqueids):
|
||||
try:
|
||||
return json.loads(uniqueids)
|
||||
except ValueError:
|
||||
log("Can't parse this lookup string, is it from another add-on?\n" + uniqueids, xbmc.LOGWARNING)
|
||||
return None
|
||||
|
||||
def run():
|
||||
params = get_params(sys.argv[1:])
|
||||
enddir = True
|
||||
if 'action' in params:
|
||||
settings = ADDON_SETTINGS if not params.get('pathSettings') else \
|
||||
PathSpecificSettings(json.loads(params['pathSettings']), lambda msg: log(msg, xbmc.LOGWARNING))
|
||||
action = params["action"]
|
||||
if action == 'find' and 'title' in params:
|
||||
search_for_movie(params["title"], params.get("year"), params['handle'], settings)
|
||||
elif action == 'getdetails' and ('url' in params or 'uniqueIDs' in params):
|
||||
unique_ids = parse_lookup_string(params.get('uniqueIDs') or params.get('url'))
|
||||
enddir = not get_details(unique_ids, params['handle'], settings, fail_silently='uniqueIDs' in params)
|
||||
elif action == 'NfoUrl' and 'nfo' in params:
|
||||
find_uniqueids_in_nfo(params["nfo"], params['handle'])
|
||||
else:
|
||||
log("unhandled action: " + action, xbmc.LOGWARNING)
|
||||
else:
|
||||
log("No action in 'params' to act on", xbmc.LOGWARNING)
|
||||
if enddir:
|
||||
xbmcplugin.endOfDirectory(params['handle'])
|
||||
|
||||
if __name__ == '__main__':
|
||||
run()
|
||||
@@ -0,0 +1,116 @@
|
||||
def configure_scraped_details(details, settings):
|
||||
details = _configure_rating_prefix(details, settings)
|
||||
details = _configure_keeporiginaltitle(details, settings)
|
||||
details = _configure_trailer(details, settings)
|
||||
details = _configure_multiple_studios(details, settings)
|
||||
details = _configure_default_rating(details, settings)
|
||||
details = _configure_tags(details, settings)
|
||||
return details
|
||||
|
||||
def configure_tmdb_artwork(details, settings):
|
||||
if 'available_art' not in details:
|
||||
return details
|
||||
|
||||
art = details['available_art']
|
||||
if not settings.getSettingBool('fetch_posters'):
|
||||
if 'poster' in art:
|
||||
del art['poster']
|
||||
if 'set.poster' in art:
|
||||
del art['set.poster']
|
||||
fanart_enabled = settings.getSettingBool('fanart')
|
||||
if not fanart_enabled:
|
||||
if 'fanart' in art:
|
||||
del art['fanart']
|
||||
if 'set.fanart' in art:
|
||||
del art['set.fanart']
|
||||
if not settings.getSettingBool('landscape'):
|
||||
if 'landscape' in art:
|
||||
if fanart_enabled:
|
||||
art['fanart'] = art.get('fanart', []) + art['landscape']
|
||||
del art['landscape']
|
||||
if 'set.landscape' in art:
|
||||
if fanart_enabled:
|
||||
art['set.fanart'] = art.get('set.fanart', []) + art['set.landscape']
|
||||
del art['set.landscape']
|
||||
|
||||
return details
|
||||
|
||||
def is_fanarttv_configured(settings):
|
||||
return settings.getSettingBool('enable_fanarttv_artwork')
|
||||
|
||||
def _configure_rating_prefix(details, settings):
|
||||
if details['info'].get('mpaa'):
|
||||
details['info']['mpaa'] = settings.getSettingString('certprefix') + details['info']['mpaa']
|
||||
return details
|
||||
|
||||
def _configure_keeporiginaltitle(details, settings):
|
||||
if settings.getSettingBool('keeporiginaltitle'):
|
||||
details['info']['title'] = details['info']['originaltitle']
|
||||
return details
|
||||
|
||||
def _configure_trailer(details, settings):
|
||||
if details['info'].get('trailer') and not settings.getSettingBool('trailer'):
|
||||
del details['info']['trailer']
|
||||
return details
|
||||
|
||||
def _configure_multiple_studios(details, settings):
|
||||
if not settings.getSettingBool('multiple_studios'):
|
||||
details['info']['studio'] = details['info']['studio'][:1]
|
||||
return details
|
||||
|
||||
def _configure_default_rating(details, settings):
|
||||
imdb_default = bool(details['ratings'].get('imdb')) and settings.getSettingString('RatingS') == 'IMDb'
|
||||
trakt_default = bool(details['ratings'].get('trakt')) and settings.getSettingString('RatingS') == 'Trakt'
|
||||
default_rating = 'themoviedb'
|
||||
if imdb_default:
|
||||
default_rating = 'imdb'
|
||||
elif trakt_default:
|
||||
default_rating = 'trakt'
|
||||
if default_rating not in details['ratings']:
|
||||
default_rating = list(details['ratings'].keys())[0] if details['ratings'] else None
|
||||
for rating_type in details['ratings'].keys():
|
||||
details['ratings'][rating_type]['default'] = rating_type == default_rating
|
||||
return details
|
||||
|
||||
def _configure_tags(details, settings):
|
||||
if not settings.getSettingBool('add_tags'):
|
||||
del details['info']['tag']
|
||||
return details
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
try:
|
||||
basestring
|
||||
except NameError: # py2 / py3
|
||||
basestring = str
|
||||
|
||||
#pylint: disable=redefined-builtin
|
||||
class PathSpecificSettings(object):
|
||||
# read-only shim for typed `xbmcaddon.Addon().getSetting*` methods
|
||||
def __init__(self, settings_dict, log_fn):
|
||||
self.data = settings_dict
|
||||
self.log = log_fn
|
||||
|
||||
def getSettingBool(self, id):
|
||||
return self._inner_get_setting(id, bool, False)
|
||||
|
||||
def getSettingInt(self, id):
|
||||
return self._inner_get_setting(id, int, 0)
|
||||
|
||||
def getSettingNumber(self, id):
|
||||
return self._inner_get_setting(id, float, 0.0)
|
||||
|
||||
def getSettingString(self, id):
|
||||
return self._inner_get_setting(id, basestring, '')
|
||||
|
||||
def _inner_get_setting(self, setting_id, setting_type, default):
|
||||
value = self.data.get(setting_id)
|
||||
if isinstance(value, setting_type):
|
||||
return value
|
||||
self._log_bad_value(value, setting_id)
|
||||
return default
|
||||
|
||||
def _log_bad_value(self, value, setting_id):
|
||||
if value is None:
|
||||
self.log("requested setting ({0}) was not found.".format(setting_id))
|
||||
else:
|
||||
self.log('failed to load value "{0}" for setting {1}'.format(value, setting_id))
|
||||
@@ -0,0 +1,60 @@
|
||||
import re
|
||||
try:
|
||||
from urlparse import parse_qsl
|
||||
except ImportError: # py2 / py3
|
||||
from urllib.parse import parse_qsl
|
||||
|
||||
# get addon params from the plugin path querystring
|
||||
def get_params(argv):
|
||||
result = {'handle': int(argv[0])}
|
||||
if len(argv) < 2 or not argv[1]:
|
||||
return result
|
||||
|
||||
result.update(parse_qsl(argv[1].lstrip('?')))
|
||||
return result
|
||||
|
||||
def combine_scraped_details_info_and_ratings(original_details, additional_details):
|
||||
def update_or_set(details, key, value):
|
||||
if key in details:
|
||||
details[key].update(value)
|
||||
else:
|
||||
details[key] = value
|
||||
|
||||
if additional_details:
|
||||
if additional_details.get('info'):
|
||||
update_or_set(original_details, 'info', additional_details['info'])
|
||||
if additional_details.get('ratings'):
|
||||
update_or_set(original_details, 'ratings', additional_details['ratings'])
|
||||
return original_details
|
||||
|
||||
def combine_scraped_details_available_artwork(original_details, additional_details, language, settings):
|
||||
if language:
|
||||
# Image languages don't have regional variants
|
||||
language = language.split('-')[0]
|
||||
if additional_details and additional_details.get('available_art'):
|
||||
available_art = additional_details['available_art']
|
||||
if not original_details.get('available_art'):
|
||||
original_details['available_art'] = {}
|
||||
for arttype, artlist in available_art.items():
|
||||
artlist = sorted(artlist, key=lambda x:x['lang']==language, reverse=True)
|
||||
combinlist = artlist + original_details['available_art'].get(arttype, [])
|
||||
original_details['available_art'][arttype] = combinlist
|
||||
|
||||
if not settings.getSettingBool('prioritize_fanarttv_artwork'):
|
||||
original_details['available_art'][arttype] = sorted(combinlist, key=lambda x:x['lang']==language, reverse=True)
|
||||
|
||||
return original_details
|
||||
|
||||
def find_uniqueids_in_text(input_text):
|
||||
result = {}
|
||||
res = re.search(r'(themoviedb.org/movie/)([0-9]+)', input_text)
|
||||
if (res):
|
||||
result['tmdb'] = res.group(2)
|
||||
res = re.search(r'imdb....?/title/tt([0-9]+)', input_text)
|
||||
if (res):
|
||||
result['imdb'] = 'tt' + res.group(1)
|
||||
else:
|
||||
res = re.search(r'imdb....?/Title\?t{0,2}([0-9]+)', input_text)
|
||||
if (res):
|
||||
result['imdb'] = 'tt' + res.group(1)
|
||||
return result
|
||||
Reference in New Issue
Block a user