This commit is contained in:
2025-10-25 13:21:06 +02:00
parent eb57506d39
commit 033ffb21f5
8388 changed files with 484789 additions and 16 deletions

View File

@@ -0,0 +1,17 @@
def get_imdb_id(uniqueids):
imdb_id = uniqueids.get('imdb')
if not imdb_id or not imdb_id.startswith('tt'):
return None
return imdb_id
# example format for scraper results
_ScraperResults = {
'info',
'ratings',
'uniqueids',
'cast',
'available_art',
'error',
'warning' # not handled
}

View File

@@ -0,0 +1,88 @@
# coding: utf-8
#
# Copyright (C) 2020, Team Kodi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""Functions to interact with various web site APIs."""
from __future__ import absolute_import, unicode_literals
import json
try:
import xbmc
except ModuleNotFoundError:
# only used for logging HTTP calls, not available nor needed for testing
xbmc = None
# from pprint import pformat
try: #PY2 / PY3
from urllib2 import Request, urlopen
from urllib2 import URLError
from urllib import urlencode
except ImportError:
from urllib.request import Request, urlopen
from urllib.error import URLError
from urllib.parse import urlencode
try:
from typing import Text, Optional, Union, List, Dict, Any # pylint: disable=unused-import
InfoType = Dict[Text, Any] # pylint: disable=invalid-name
except ImportError:
pass
HEADERS = {}
def set_headers(headers):
HEADERS.clear()
HEADERS.update(headers)
def load_info(url, params=None, default=None, resp_type = 'json'):
# type: (Text, Optional[Dict[Text, Union[Text, List[Text]]]]) -> Union[dict, list]
"""
Load info from external api
:param url: API endpoint URL
:param params: URL query params
:default: object to return if there is an error
:resp_type: what to return to the calling function
:return: API response or default on error
"""
theerror = ''
if params:
url = url + '?' + urlencode(params)
if xbmc:
xbmc.log('Calling URL "{}"'.format(url), xbmc.LOGDEBUG)
if HEADERS:
xbmc.log(str(HEADERS), xbmc.LOGDEBUG)
req = Request(url, headers=HEADERS)
try:
response = urlopen(req)
except URLError as e:
if hasattr(e, 'reason'):
theerror = {'error': 'failed to reach the remote site\nReason: {}'.format(e.reason)}
elif hasattr(e, 'code'):
theerror = {'error': 'remote site unable to fulfill the request\nError code: {}'.format(e.code)}
if default is not None:
return default
else:
return theerror
if resp_type.lower() == 'json':
resp = json.loads(response.read().decode('utf-8'))
else:
resp = response.read().decode('utf-8')
# xbmc.log('the api response:\n{}'.format(pformat(resp)), xbmc.LOGDEBUG)
return resp

View File

@@ -0,0 +1,93 @@
from . import api_utils
try:
from urllib import quote
except ImportError: # py2 / py3
from urllib.parse import quote
API_KEY = '384afe262ee0962545a752ff340e3ce4'
API_URL = 'https://webservice.fanart.tv/v3/movies/{}'
ARTMAP = {
'movielogo': 'clearlogo',
'hdmovielogo': 'clearlogo',
'hdmovieclearart': 'clearart',
'movieart': 'clearart',
'moviedisc': 'discart',
'moviebanner': 'banner',
'moviethumb': 'landscape',
'moviebackground': 'fanart',
'movieposter': 'poster'
}
HEADERS = (
('User-Agent', 'Kodi Movie scraper by Team Kodi'),
('api-key', API_KEY),
)
def get_details(uniqueids, clientkey, language, set_tmdbid):
media_id = _get_mediaid(uniqueids)
if not media_id:
return {}
movie_data = _get_data(media_id, clientkey)
movieset_data = _get_data(set_tmdbid, clientkey) if set_tmdbid else None
if not movie_data and not movieset_data:
return {}
movie_art = {}
movieset_art = {}
if movie_data:
movie_art = _parse_data(movie_data, language)
if movieset_data:
movieset_art = _parse_data(movieset_data, language)
movieset_art = {'set.' + key: value for key, value in movieset_art.items()}
available_art = movie_art
available_art.update(movieset_art)
return {'available_art': available_art}
def _get_mediaid(uniqueids):
for source in ('tmdb', 'imdb', 'unknown'):
if source in uniqueids:
return uniqueids[source]
def _get_data(media_id, clientkey):
headers = dict(HEADERS)
if clientkey:
headers['client-key'] = clientkey
api_utils.set_headers(headers)
fanarttv_url = API_URL.format(media_id)
return api_utils.load_info(fanarttv_url, default={})
def _parse_data(data, language, language_fallback='en'):
result = {}
for arttype, artlist in data.items():
if arttype not in ARTMAP:
continue
for image in artlist:
image_lang = _get_imagelanguage(arttype, image)
if image_lang and image_lang != language and image_lang != language_fallback:
continue
generaltype = ARTMAP[arttype]
if generaltype == 'poster' and not image_lang:
generaltype = 'keyart'
if artlist and generaltype not in result:
result[generaltype] = []
url = quote(image['url'], safe="%/:=&?~#+!$,;'@()*[]")
resultimage = {'url': url, 'preview': url.replace('.fanart.tv/fanart/', '.fanart.tv/preview/'), 'lang': image_lang}
result[generaltype].append(resultimage)
return result
def _get_imagelanguage(arttype, image):
if 'lang' not in image or arttype == 'moviebackground':
return None
if arttype in ('movielogo', 'hdmovielogo', 'hdmovieclearart', 'movieart', 'moviebanner',
'moviethumb', 'moviedisc'):
return image['lang'] if image['lang'] not in ('', '00') else 'en'
# movieposter may or may not have a title and thus need a language
return image['lang'] if image['lang'] not in ('', '00') else None

View File

@@ -0,0 +1,111 @@
# -*- coding: UTF-8 -*-
#
# Copyright (C) 2020, Team Kodi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# IMDb ratings based on code in metadata.themoviedb.org.python by Team Kodi
# pylint: disable=missing-docstring
import json
import re
from . import api_utils
from . import get_imdb_id
IMDB_RATINGS_URL = 'https://www.imdb.com/title/{}/'
IMDB_LDJSON_REGEX = re.compile(r'<script type="application/ld\+json">(.*?)</script>', re.DOTALL)
IMDB_TOP250_REGEX = re.compile(r'Top rated movie #(\d+)')
# previous IMDB page design before June 2021
IMDB_RATING_REGEX_PREVIOUS = re.compile(r'itemprop="ratingValue".*?>.*?([\d.]+).*?<')
IMDB_VOTES_REGEX_PREVIOUS = re.compile(r'itemprop="ratingCount".*?>.*?([\d,]+).*?<')
IMDB_TOP250_REGEX_PREVIOUS = re.compile(r'Top Rated Movies #(\d+)')
HEADERS = (
('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'),
('Accept', 'application/json'),
)
def get_details(uniqueids):
imdb_id = get_imdb_id(uniqueids)
if not imdb_id:
return {}
votes, rating, top250 = _get_ratinginfo(imdb_id)
return _assemble_imdb_result(votes, rating, top250)
def _get_ratinginfo(imdb_id):
api_utils.set_headers(dict(HEADERS))
response = api_utils.load_info(IMDB_RATINGS_URL.format(imdb_id), default = '', resp_type='text')
return _parse_imdb_result(response)
def _assemble_imdb_result(votes, rating, top250):
result = {}
if top250:
result['info'] = {'top250': top250}
if votes and rating:
result['ratings'] = {'imdb': {'votes': votes, 'rating': rating}}
return result
def _parse_imdb_result(input_html):
rating, votes = _parse_imdb_rating_and_votes(input_html)
if rating is None or votes is None:
# try previous parsers
rating = _parse_imdb_rating_previous(input_html)
votes = _parse_imdb_votes_previous(input_html)
top250 = _parse_imdb_top250(input_html)
if top250 is None:
top250 = _parse_imdb_top250_previous(input_html)
return votes, rating, top250
def _parse_imdb_rating_and_votes(input_html):
match = re.search(IMDB_LDJSON_REGEX, input_html)
if not match:
return None, None
try:
ldjson = json.loads(match.group(1).replace('\n', ''))
except json.decoder.JSONDecodeError:
return None, None
try:
aggregateRating = ldjson.get('aggregateRating', {})
rating_value = aggregateRating.get('ratingValue')
return rating_value, aggregateRating.get('ratingCount')
except AttributeError:
return None, None
def _parse_imdb_top250(input_html):
match = re.search(IMDB_TOP250_REGEX, input_html)
if match:
return int(match.group(1))
return None
def _parse_imdb_rating_previous(input_html):
match = re.search(IMDB_RATING_REGEX_PREVIOUS, input_html)
if (match):
return float(match.group(1))
return None
def _parse_imdb_votes_previous(input_html):
match = re.search(IMDB_VOTES_REGEX_PREVIOUS, input_html)
if (match):
return int(match.group(1).replace(',', ''))
return None
def _parse_imdb_top250_previous(input_html):
match = re.search(IMDB_TOP250_REGEX_PREVIOUS, input_html)
if (match):
return int(match.group(1))
return None

View File

@@ -0,0 +1,270 @@
from datetime import datetime, timedelta
from . import tmdbapi
class TMDBMovieScraper(object):
def __init__(self, url_settings, language, certification_country, search_language=""):
self.url_settings = url_settings
self.language = language
self.certification_country = certification_country
if(search_language == ""):
self.search_language = language
else:
self.search_language = search_language
self._urls = None
@property
def urls(self):
if not self._urls:
self._urls = _load_base_urls(self.url_settings)
return self._urls
def search(self, title, year=None):
def is_best(item):
return item['title'].lower() == title and (
not year or item.get('release_date', '').startswith(year))
search_media_id = _parse_media_id(title)
if search_media_id:
if search_media_id['type'] == 'tmdb':
result = _get_movie(search_media_id['id'], None, True)
if 'error' in result:
return result
result = [result]
else:
result = tmdbapi.find_movie_by_external_id(search_media_id['id'], language=self.search_language)
if 'error' in result:
return result
result = result.get('movie_results')
else:
response = tmdbapi.search_movie(query=title, year=year, language=self.search_language)
if 'error' in response:
return response
result = response['results']
# get second page if available and if first page doesn't contain an `is_best` result with popularity > 5
if response['total_pages'] > 1:
bests = [item for item in result if is_best(item) and item.get('popularity',0) > 5]
if not bests:
response = tmdbapi.search_movie(query=title, year=year, language=self.language, page=2)
if not 'error' in response:
result += response['results']
urls = self.urls
if result:
# move all `is_best` results at the beginning of the list, sort them by popularity (if found):
bests_first = sorted([item for item in result if is_best(item)], key=lambda k: k.get('popularity',0), reverse=True)
result = bests_first + [item for item in result if item not in bests_first]
for item in result:
if item.get('poster_path'):
item['poster_path'] = urls['preview'] + item['poster_path']
if item.get('backdrop_path'):
item['backdrop_path'] = urls['preview'] + item['backdrop_path']
return result
def get_details(self, uniqueids):
media_id = uniqueids.get('tmdb')
if not media_id:
imdb_id = uniqueids.get('imdb')
if not imdb_id:
return None
find_results = tmdbapi.find_movie_by_external_id(imdb_id)
if 'error' in find_results:
return find_results
if find_results.get('movie_results'):
movie = find_results['movie_results'][0]
media_id = movie['id']
if not media_id:
return None
details = self._gather_details(media_id)
if not details:
return None
if details.get('error'):
return details
return self._assemble_details(**details)
def _gather_details(self, media_id):
movie = _get_movie(media_id, self.language)
if not movie or movie.get('error'):
return movie
# don't specify language to get English text for fallback
movie_fallback = _get_movie(media_id)
movie['images'] = movie_fallback['images']
collection = _get_moviecollection(movie['belongs_to_collection'].get('id'), self.language) if \
movie['belongs_to_collection'] else None
collection_fallback = _get_moviecollection(movie['belongs_to_collection'].get('id')) if \
movie['belongs_to_collection'] else None
if collection and collection_fallback and 'images' in collection_fallback:
collection['images'] = collection_fallback['images']
return {'movie': movie, 'movie_fallback': movie_fallback, 'collection': collection,
'collection_fallback': collection_fallback}
def _assemble_details(self, movie, movie_fallback, collection, collection_fallback):
info = {
'title': movie['title'],
'originaltitle': movie['original_title'],
'plot': movie.get('overview') or movie_fallback.get('overview'),
'tagline': movie.get('tagline') or movie_fallback.get('tagline'),
'studio': _get_names(movie['production_companies']),
'genre': _get_names(movie['genres']),
'country': _get_names(movie['production_countries']),
'credits': _get_cast_members(movie['casts'], 'crew', 'Writing', ['Screenplay', 'Writer', 'Author']),
'director': _get_cast_members(movie['casts'], 'crew', 'Directing', ['Director']),
'premiered': movie['release_date'],
'tag': _get_names(movie['keywords']['keywords'])
}
if 'countries' in movie['releases']:
certcountry = self.certification_country.upper()
for country in movie['releases']['countries']:
if country['iso_3166_1'] == certcountry and country['certification']:
info['mpaa'] = country['certification']
break
trailer = _parse_trailer(movie.get('trailers', {}), movie_fallback.get('trailers', {}))
if trailer:
info['trailer'] = trailer
if collection:
info['set'] = collection.get('name') or collection_fallback.get('name')
info['setoverview'] = collection.get('overview') or collection_fallback.get('overview')
if movie.get('runtime'):
info['duration'] = movie['runtime'] * 60
ratings = {'themoviedb': {'rating': float(movie['vote_average']), 'votes': int(movie['vote_count'])}}
uniqueids = {'tmdb': str(movie['id']), 'imdb': movie['imdb_id']}
cast = [{
'name': actor['name'],
'role': actor['character'],
'thumbnail': self.urls['original'] + actor['profile_path']
if actor['profile_path'] else "",
'order': actor['order']
}
for actor in movie['casts'].get('cast', [])
]
available_art = _parse_artwork(movie, collection, self.urls, self.language)
_info = {'set_tmdbid': movie['belongs_to_collection'].get('id')
if movie['belongs_to_collection'] else None}
return {'info': info, 'ratings': ratings, 'uniqueids': uniqueids, 'cast': cast,
'available_art': available_art, '_info': _info}
def _parse_media_id(title):
if title.startswith('tt') and title[2:].isdigit():
return {'type': 'imdb', 'id':title} # IMDB ID works alone because it is clear
title = title.lower()
if title.startswith('tmdb/') and title[5:].isdigit(): # TMDB ID
return {'type': 'tmdb', 'id':title[5:]}
elif title.startswith('imdb/tt') and title[7:].isdigit(): # IMDB ID with prefix to match
return {'type': 'imdb', 'id':title[5:]}
return None
def _get_movie(mid, language=None, search=False):
details = None if search else \
'trailers,images,releases,casts,keywords' if language is not None else \
'trailers,images'
return tmdbapi.get_movie(mid, language=language, append_to_response=details)
def _get_moviecollection(collection_id, language=None):
if not collection_id:
return None
details = 'images'
return tmdbapi.get_collection(collection_id, language=language, append_to_response=details)
def _parse_artwork(movie, collection, urlbases, language):
if language:
# Image languages don't have regional variants
language = language.split('-')[0]
posters = []
landscape = []
logos = []
fanart = []
if 'images' in movie:
posters = _build_image_list_with_fallback(movie['images']['posters'], urlbases, language)
landscape = _build_image_list_with_fallback(movie['images']['backdrops'], urlbases, language)
logos = _build_image_list_with_fallback(movie['images']['logos'], urlbases, language)
fanart = _build_fanart_list(movie['images']['backdrops'], urlbases)
setposters = []
setlandscape = []
setfanart = []
if collection and 'images' in collection:
setposters = _build_image_list_with_fallback(collection['images']['posters'], urlbases, language)
setlandscape = _build_image_list_with_fallback(collection['images']['backdrops'], urlbases, language)
setfanart = _build_fanart_list(collection['images']['backdrops'], urlbases)
return {'poster': posters, 'landscape': landscape, 'fanart': fanart,
'set.poster': setposters, 'set.landscape': setlandscape, 'set.fanart': setfanart, 'clearlogo': logos}
def _build_image_list_with_fallback(imagelist, urlbases, language, language_fallback='en'):
images = _build_image_list(imagelist, urlbases, [language])
# Add backup images
if language != language_fallback:
images.extend(_build_image_list(imagelist, urlbases, [language_fallback]))
# Add any images if nothing set so far
if not images:
images = _build_image_list(imagelist, urlbases)
return images
def _build_fanart_list(imagelist, urlbases):
return _build_image_list(imagelist, urlbases, ['xx', None])
def _build_image_list(imagelist, urlbases, languages=[]):
result = []
for img in imagelist:
if languages and img['iso_639_1'] not in languages:
continue
if img['file_path'].endswith('.svg'):
continue
result.append({
'url': urlbases['original'] + img['file_path'],
'preview': urlbases['preview'] + img['file_path'],
'lang': img['iso_639_1']
})
return result
def _get_date_numeric(datetime_):
return (datetime_ - datetime(1970, 1, 1)).total_seconds()
def _load_base_urls(url_settings):
urls = {}
urls['original'] = url_settings.getSettingString('originalUrl')
urls['preview'] = url_settings.getSettingString('previewUrl')
last_updated = url_settings.getSettingString('lastUpdated')
if not urls['original'] or not urls['preview'] or not last_updated or \
float(last_updated) < _get_date_numeric(datetime.now() - timedelta(days=30)):
conf = tmdbapi.get_configuration()
if conf:
urls['original'] = conf['images']['secure_base_url'] + 'original'
urls['preview'] = conf['images']['secure_base_url'] + 'w780'
url_settings.setSetting('originalUrl', urls['original'])
url_settings.setSetting('previewUrl', urls['preview'])
url_settings.setSetting('lastUpdated', str(_get_date_numeric(datetime.now())))
return urls
def _parse_trailer(trailers, fallback):
if trailers.get('youtube'):
return 'plugin://plugin.video.youtube/play/?video_id='+trailers['youtube'][0]['source']
if fallback.get('youtube'):
return 'plugin://plugin.video.youtube/play/?video_id='+fallback['youtube'][0]['source']
return None
def _get_names(items):
return [item['name'] for item in items] if items else []
def _get_cast_members(casts, casttype, department, jobs):
result = []
if casttype in casts:
for cast in casts[casttype]:
if cast['department'] == department and cast['job'] in jobs and cast['name'] not in result:
result.append(cast['name'])
return result

View File

@@ -0,0 +1,144 @@
# -*- coding: UTF-8 -*-
#
# Copyright (C) 2020, Team Kodi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# pylint: disable=missing-docstring
"""Functions to interact with TMDb API."""
import unicodedata
from . import api_utils
try:
import xbmc
except ModuleNotFoundError:
# only used for logging HTTP calls, not available nor needed for testing
xbmc = None
try:
from typing import Optional, Text, Dict, List, Any # pylint: disable=unused-import
InfoType = Dict[Text, Any] # pylint: disable=invalid-name
except ImportError:
pass
HEADERS = (
('User-Agent', 'Kodi Movie scraper by Team Kodi'),
('Accept', 'application/json'),
)
TMDB_PARAMS = {'api_key': 'f090bb54758cabf231fb605d3e3e0468'}
BASE_URL = 'https://api.themoviedb.org/3/{}'
SEARCH_URL = BASE_URL.format('search/movie')
FIND_URL = BASE_URL.format('find/{}')
MOVIE_URL = BASE_URL.format('movie/{}')
COLLECTION_URL = BASE_URL.format('collection/{}')
CONFIG_URL = BASE_URL.format('configuration')
def log(message):
if xbmc:
xbmc.log(message, xbmc.LOGDEBUG)
def search_movie(query, year=None, language=None, page=None):
# type: (Text) -> List[InfoType]
"""
Search for a movie
:param title: movie title to search
:param year: the year to search (optional)
:param language: the language filter for TMDb (optional)
:param page: the results page to return (optional)
:return: a list with found movies
"""
query = unicodedata.normalize('NFC', query)
log('using title of %s to find movie' % query)
theurl = SEARCH_URL
params = _set_params(None, language)
params['query'] = query
if page is not None:
params['page'] = page
if year is not None:
params['year'] = str(year)
api_utils.set_headers(dict(HEADERS))
return api_utils.load_info(theurl, params=params)
def find_movie_by_external_id(external_id, language=None):
# type: (Text) -> List[InfoType]
"""
Find movie based on external ID
:param mid: external ID
:param language: the language filter for TMDb (optional)
:return: the movie or error
"""
log('using external id of %s to find movie' % external_id)
theurl = FIND_URL.format(external_id)
params = _set_params(None, language)
params['external_source'] = 'imdb_id'
api_utils.set_headers(dict(HEADERS))
return api_utils.load_info(theurl, params=params)
def get_movie(mid, language=None, append_to_response=None):
# type: (Text) -> List[InfoType]
"""
Get movie details
:param mid: TMDb movie ID
:param language: the language filter for TMDb (optional)
:append_to_response: the additional data to get from TMDb (optional)
:return: the movie or error
"""
log('using movie id of %s to get movie details' % mid)
theurl = MOVIE_URL.format(mid)
api_utils.set_headers(dict(HEADERS))
return api_utils.load_info(theurl, params=_set_params(append_to_response, language))
def get_collection(collection_id, language=None, append_to_response=None):
# type: (Text) -> List[InfoType]
"""
Get movie collection information
:param collection_id: TMDb collection ID
:param language: the language filter for TMDb (optional)
:append_to_response: the additional data to get from TMDb (optional)
:return: the movie or error
"""
log('using collection id of %s to get collection details' % collection_id)
theurl = COLLECTION_URL.format(collection_id)
api_utils.set_headers(dict(HEADERS))
return api_utils.load_info(theurl, params=_set_params(append_to_response, language))
def get_configuration():
# type: (Text) -> List[InfoType]
"""
Get configuration information
:return: configuration details or error
"""
log('getting configuration details')
api_utils.set_headers(dict(HEADERS))
return api_utils.load_info(CONFIG_URL, params=TMDB_PARAMS.copy())
def _set_params(append_to_response, language):
params = TMDB_PARAMS.copy()
if language is not None:
params['language'] = language
if append_to_response is not None:
params['append_to_response'] = append_to_response
return params

View File

@@ -0,0 +1,55 @@
# -*- coding: UTF-8 -*-
#
# Copyright (C) 2020, Team Kodi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# pylint: disable=missing-docstring
"""Functions to interact with Trakt API."""
from __future__ import absolute_import, unicode_literals
from . import api_utils
from . import get_imdb_id
try:
from typing import Optional, Text, Dict, List, Any # pylint: disable=unused-import
InfoType = Dict[Text, Any] # pylint: disable=invalid-name
except ImportError:
pass
HEADERS = (
('User-Agent', 'Kodi Movie scraper by Team Kodi'),
('Accept', 'application/json'),
('trakt-api-key', '5f2dc73b6b11c2ac212f5d8b4ec8f3dc4b727bb3f026cd254d89eda997fe64ae'),
('trakt-api-version', '2'),
('Content-Type', 'application/json'),
)
MOVIE_URL = 'https://api.trakt.tv/movies/{}'
def get_trakt_ratinginfo(uniqueids):
imdb_id = get_imdb_id(uniqueids)
result = {}
url = MOVIE_URL.format(imdb_id)
params = {'extended': 'full'}
api_utils.set_headers(dict(HEADERS))
movie_info = api_utils.load_info(url, params=params, default={})
if(movie_info):
if 'votes' in movie_info and 'rating' in movie_info:
result['ratings'] = {'trakt': {'votes': int(movie_info['votes']), 'rating': float(movie_info['rating'])}}
elif 'rating' in movie_info:
result['ratings'] = {'trakt': {'rating': float(movie_info['rating'])}}
return result