Created
February 11, 2026 08:49
-
-
Save CarstenG2/78ce8e606cc7f5e81b053d45e085bb8d to your computer and use it in GitHub Desktop.
xShip v2026.01.18 - Python 3.12+ fixes + critical Python 3 bug fixes + super_meta fix
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import os.path | |
| import importlib | |
| import importlib.util | |
| import pkgutil | |
| from resources.lib import log_utils | |
| try: | |
| import xbmcaddon | |
| __addon__ = xbmcaddon.Addon() | |
| except: | |
| __addon__ = None | |
| pass | |
| debug = True #if __addon__.getSetting('debug.enabled') == 'true' else False | |
| def sources(specified_folders=None): | |
| try: | |
| sourceDict = [] | |
| provider = os.path.dirname(__file__).split(os.sep)[-1] | |
| sourceFolder = getScraperFolder(provider) | |
| sourceFolderLocation = os.path.join(os.path.dirname(__file__), sourceFolder) | |
| sourceSubFolders = [x[1] for x in os.walk(sourceFolderLocation)][0] | |
| if specified_folders is not None: | |
| sourceSubFolders = specified_folders | |
| for i in sourceSubFolders: | |
| for loader, module_name, is_pkg in pkgutil.walk_packages([os.path.join(sourceFolderLocation, i)]): | |
| if is_pkg: | |
| continue | |
| if enabledCheck(module_name): | |
| try: | |
| if hasattr(loader, 'find_module'): | |
| module = loader.find_module(module_name).load_module(module_name) | |
| else: | |
| spec = loader.find_spec(module_name, None) | |
| module = importlib.util.module_from_spec(spec) | |
| spec.loader.exec_module(module) | |
| sourceDict.append((module_name, module.source())) | |
| except Exception as e: | |
| __addon__.setSetting('provider.' + module_name, 'false') | |
| if debug: | |
| log_utils.log('Error: Loading module: "%s": %s' % (module_name, e), log_utils.LOGERROR) | |
| pass | |
| return sourceDict | |
| except: | |
| return [] | |
| def enabledCheck(module_name): | |
| if __addon__ is not None: | |
| if __addon__.getSetting('provider.' + module_name) == 'false' or __addon__.getSetting('provider.' + module_name + '.check') == 'false': | |
| return False | |
| return True | |
| def providerSources(): | |
| sourceSubFolders = [x[1] for x in os.walk(os.path.dirname(__file__))][0] | |
| return getModuleName(sourceSubFolders) | |
| def providerNames(): | |
| providerList = [] | |
| provider = __addon__.getSetting('module.provider') | |
| sourceFolder = getScraperFolder(provider) | |
| sourceFolderLocation = os.path.join(os.path.dirname(__file__), sourceFolder) | |
| sourceSubFolders = [x[1] for x in os.walk(sourceFolderLocation)][0] | |
| for i in sourceSubFolders: | |
| for loader, module_name, is_pkg in pkgutil.walk_packages([os.path.join(sourceFolderLocation, i)]): | |
| if is_pkg: | |
| continue | |
| correctName = module_name.split('_')[0] | |
| providerList.append(correctName) | |
| return providerList | |
| def getAllHosters(): | |
| def _sources(sourceFolder, appendList): | |
| sourceFolderLocation = os.path.join(os.path.dirname(__file__), sourceFolder) | |
| sourceSubFolders = [x[1] for x in os.walk(sourceFolderLocation)][0] | |
| for i in sourceSubFolders: | |
| for loader, module_name, is_pkg in pkgutil.walk_packages([os.path.join(sourceFolderLocation, i)]): | |
| if is_pkg: | |
| continue | |
| try: | |
| mn = str(module_name).split('_')[0] | |
| except: | |
| mn = str(module_name) | |
| appendList.append(mn) | |
| sourceSubFolders = [x[1] for x in os.walk(os.path.dirname(__file__))][0] | |
| appendList = [] | |
| for item in sourceSubFolders: | |
| if item != 'modules': | |
| _sources(item, appendList) | |
| return list(set(appendList)) | |
| def getScraperFolder(scraper_source): | |
| sourceSubFolders = [x[1] for x in os.walk(os.path.dirname(__file__))][0] | |
| return [i for i in sourceSubFolders if scraper_source.lower() in i.lower()][0] | |
| def getModuleName(scraper_folders): | |
| nameList = [] | |
| for s in scraper_folders: | |
| try: | |
| nameList.append(s.split('_')[1].lower().title()) | |
| except: | |
| pass | |
| return nameList |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| # Python 3 | |
| #Always pay attention to the translations in the menu! | |
| # Sprachauswahl für Hoster enthalten. | |
| # Ajax Suchfunktion enthalten. | |
| # HTML LangzeitCache hinzugefügt | |
| # showValue: 24 Stunden | |
| # showAllSeries: 24 Stunden | |
| # showEpisodes: 4 Stunden | |
| # SSsearch: 24 Stunden | |
| # 2022-12-06 Heptamer - Suchfunktion überarbeitet | |
| import xbmcgui | |
| from resources.lib.handler.ParameterHandler import ParameterHandler | |
| from resources.lib.handler.requestHandler import cRequestHandler | |
| from resources.lib.tools import logger, cParser, cUtil | |
| from resources.lib.gui.guiElement import cGuiElement | |
| from resources.lib.config import cConfig | |
| from resources.lib.gui.gui import cGui | |
| SITE_IDENTIFIER = 'aniworld' | |
| SITE_NAME = 'AniWorld' | |
| SITE_ICON = 'aniworld.png' | |
| # Global search function is thus deactivated! | |
| if cConfig().getSetting('global_search_' + SITE_IDENTIFIER) == 'false': | |
| SITE_GLOBAL_SEARCH = False | |
| logger.info('-> [SitePlugin]: globalSearch for %s is deactivated.' % SITE_NAME) | |
| # Domain Abfrage | |
| DOMAIN = cConfig().getSetting('plugin_' + SITE_IDENTIFIER + '.domain') # Domain Auswahl über die xStream Einstellungen möglich | |
| STATUS = cConfig().getSetting('plugin_' + SITE_IDENTIFIER + '_status') # Status Code Abfrage der Domain | |
| ACTIVE = cConfig().getSetting('plugin_' + SITE_IDENTIFIER) # Ob Plugin aktiviert ist oder nicht | |
| URL_MAIN = 'https://' + DOMAIN | |
| # URL_MAIN = 'https://aniworld.to' | |
| URL_SERIES = URL_MAIN + '/animes' | |
| URL_POPULAR = URL_MAIN + '/beliebte-animes' | |
| URL_NEW_EPISODES = URL_MAIN + '/neue-episoden' | |
| URL_LOGIN = URL_MAIN + '/login' | |
| REFERER = 'https://' + DOMAIN | |
| # | |
| def load(): # Menu structure of the site plugin | |
| logger.info('Load %s' % SITE_NAME) | |
| params = ParameterHandler() | |
| username = cConfig().getSetting('aniworld.user') # Username | |
| password = cConfig().getSetting('aniworld.pass') # Password | |
| if username == '' or password == '': # If no username and password were set, close the plugin! | |
| xbmcgui.Dialog().ok(cConfig().getLocalizedString(30241), cConfig().getLocalizedString(30263)) # Info Dialog! | |
| else: | |
| params.setParam('sUrl', URL_SERIES) | |
| cGui().addFolder(cGuiElement(cConfig().getLocalizedString(30518), SITE_IDENTIFIER, 'showAllSeries'), params) # All Series | |
| params.setParam('sUrl', URL_NEW_EPISODES) | |
| cGui().addFolder(cGuiElement(cConfig().getLocalizedString(30516), SITE_IDENTIFIER, 'showNewEpisodes'), params) # New Episodes | |
| params.setParam('sUrl', URL_POPULAR) | |
| cGui().addFolder(cGuiElement(cConfig().getLocalizedString(30519), SITE_IDENTIFIER, 'showEntries'), params) # Popular Series | |
| params.setParam('sUrl', URL_MAIN) | |
| params.setParam('sCont', 'catalogNav') | |
| cGui().addFolder(cGuiElement(cConfig().getLocalizedString(30517), SITE_IDENTIFIER, 'showValue'), params) # From A-Z | |
| params.setParam('sCont', 'homeContentGenresList') | |
| cGui().addFolder(cGuiElement(cConfig().getLocalizedString(30506), SITE_IDENTIFIER, 'showValue'), params) # Genre | |
| cGui().addFolder(cGuiElement(cConfig().getLocalizedString(30520), SITE_IDENTIFIER, 'showSearch'), params) # Search | |
| cGui().setEndOfDirectory() | |
| def showValue(): | |
| params = ParameterHandler() | |
| sUrl = params.getValue('sUrl') | |
| oRequest = cRequestHandler(sUrl) | |
| if cConfig().getSetting('global_search_' + SITE_IDENTIFIER) == 'true': | |
| oRequest.cacheTime = 60 * 60 * 24 # HTML Cache Zeit 1 Tag | |
| sHtmlContent = oRequest.request() | |
| isMatch, sContainer = cParser.parseSingleResult(sHtmlContent, r'<ul[^>]*class="%s"[^>]*>(.*?)<\/ul>' % params.getValue('sCont')) | |
| if isMatch: | |
| isMatch, aResult = cParser.parse(sContainer, r'<li>\s*<a[^>]*href="([^"]*)"[^>]*>(.*?)<\/a>\s*<\/li>') | |
| aResult = sorted(aResult, key=lambda x: x[1].lower()) # Sort alphabetically by name (case-insensitive) | |
| if not isMatch: | |
| cGui().showInfo() | |
| return | |
| for sUrl, sName in aResult: | |
| sUrl = sUrl if sUrl.startswith('http') else URL_MAIN + sUrl | |
| params.setParam('sUrl', sUrl) | |
| cGui().addFolder(cGuiElement(sName, SITE_IDENTIFIER, 'showEntries'), params) | |
| cGui().setEndOfDirectory() | |
| def showAllSeries(entryUrl=False, sGui=False, sSearchText=False): | |
| oGui = sGui if sGui else cGui() | |
| params = ParameterHandler() | |
| if not entryUrl: entryUrl = params.getValue('sUrl') | |
| oRequest = cRequestHandler(entryUrl, ignoreErrors=(sGui is not False)) | |
| if cConfig().getSetting('global_search_' + SITE_IDENTIFIER) == 'true': | |
| oRequest.cacheTime = 60 * 60 * 24 # HTML Cache Zeit 1 Tag | |
| sHtmlContent = oRequest.request() | |
| pattern = '<a[^>]*href="(\\/anime\\/[^"]*)"[^>]*>(.*?)</a>' | |
| isMatch, aResult = cParser.parse(sHtmlContent, pattern) | |
| if not isMatch: | |
| if not sGui: oGui.showInfo() | |
| return | |
| aResult = sorted(aResult, key=lambda x: x[1].lower()) # Sort alphabetically by name (case-insensitive) | |
| total = len(aResult) | |
| for sUrl, sName in aResult: | |
| if sSearchText and not cParser.search(sSearchText, sName): | |
| continue | |
| oGuiElement = cGuiElement(sName, SITE_IDENTIFIER, 'showSeasons') | |
| oGuiElement.setMediaType('tvshow') | |
| params.setParam('sUrl', URL_MAIN + sUrl) | |
| params.setParam('TVShowTitle', sName) | |
| oGui.addFolder(oGuiElement, params, True, total) | |
| if not sGui: | |
| oGui.setView('tvshows') | |
| oGui.setEndOfDirectory() | |
| def showNewEpisodes(entryUrl=False, sGui=False): | |
| oGui = sGui if sGui else cGui() | |
| params = ParameterHandler() | |
| if not entryUrl: | |
| entryUrl = params.getValue('sUrl') | |
| oRequest = cRequestHandler(entryUrl, ignoreErrors=(sGui is not False)) | |
| if cConfig().getSetting('global_search_' + SITE_IDENTIFIER) == 'true': | |
| oRequest.cacheTime = 60 * 60 * 4 # HTML Cache Zeit 4 Stunden | |
| sHtmlContent = oRequest.request() | |
| pattern = r'<div[^>]*class="col-md-[^"]*"[^>]*>\s*<a[^>]*href="([^"]*)"[^>]*>\s*<strong>([^<]+)</strong>\s*<span[^>]*>([^<]+)</span>' | |
| isMatch, aResult = cParser.parse(sHtmlContent, pattern) | |
| if not isMatch: | |
| if not sGui: oGui.showInfo() | |
| return | |
| aResult = sorted(aResult, key=lambda x: x[1].lower()) # Sort alphabetically by name (case-insensitive) | |
| total = len(aResult) | |
| seen_series = set() | |
| for sUrl, sName, sInfo in aResult: | |
| if sName in seen_series: | |
| continue | |
| seen_series.add(sName) | |
| sMovieTitle = sName + ' ' + sInfo | |
| oGuiElement = cGuiElement(sName, SITE_IDENTIFIER, 'showSeasons') | |
| oGuiElement.setMediaType('tvshow') | |
| oGuiElement.setTitle(sMovieTitle) | |
| params.setParam('sUrl', URL_MAIN + sUrl) | |
| params.setParam('TVShowTitle', sMovieTitle) | |
| oGui.addFolder(oGuiElement, params, True, total) | |
| if not sGui: | |
| oGui.setView('tvshows') | |
| oGui.setEndOfDirectory() | |
| def showEntries(entryUrl=False, sGui=False): | |
| oGui = sGui if sGui else cGui() | |
| params = ParameterHandler() | |
| if not entryUrl: | |
| entryUrl = params.getValue('sUrl') | |
| oRequest = cRequestHandler(entryUrl, ignoreErrors=(sGui is not False)) | |
| if cConfig().getSetting('global_search_' + SITE_IDENTIFIER) == 'true': | |
| oRequest.cacheTime = 60 * 60 * 6 # HTML Cache Zeit 6 Stunden | |
| sHtmlContent = oRequest.request() | |
| #Aufbau pattern | |
| #'<div[^>]*class="col-md-[^"]*"[^>]*>.*?' # start element | |
| #'<a[^>]*href="([^"]*)"[^>]*>.*?' # url | |
| #'<img[^>]*src="([^"]*)"[^>]*>.*?' # thumbnail | |
| #'<h3>(.*?)<span[^>]*class="paragraph-end">.*?' # title | |
| #'<\\/div>' # end element | |
| pattern = '<div[^>]*class="col-md-[^"]*"[^>]*>.*?<a[^>]*href="([^"]*)"[^>]*>.*?<img[^>]*src="([^"]*)"[^>]*>.*?<h3>(.*?)<span[^>]*class="paragraph-end">.*?</div>' | |
| isMatch, aResult = cParser.parse(sHtmlContent, pattern) | |
| if not isMatch: | |
| if not sGui: oGui.showInfo() | |
| return | |
| aResult = sorted(aResult, key=lambda x: x[2].lower()) # Sort alphabetically by name (case-insensitive) | |
| total = len(aResult) | |
| for sUrl, sThumbnail, sName in aResult: | |
| if sThumbnail.startswith('/'): | |
| sThumbnail = URL_MAIN + sThumbnail | |
| oGuiElement = cGuiElement(sName, SITE_IDENTIFIER, 'showSeasons') | |
| oGuiElement.setThumbnail(sThumbnail) | |
| oGuiElement.setMediaType('tvshow') | |
| params.setParam('sUrl', URL_MAIN + sUrl) | |
| params.setParam('TVShowTitle', sName) | |
| oGui.addFolder(oGuiElement, params, True, total) | |
| if not sGui: | |
| pattern = 'pagination">.*?<a href="([^"]+)">></a>.*?</a></div>' | |
| isMatchNextPage, sNextUrl = cParser.parseSingleResult(sHtmlContent, pattern) | |
| if isMatchNextPage: | |
| params.setParam('sUrl', sNextUrl) | |
| oGui.addNextPage(SITE_IDENTIFIER, 'showEntries', params) | |
| oGui.setView('tvshows') | |
| oGui.setEndOfDirectory() | |
| def showSeasons(): | |
| params = ParameterHandler() | |
| sUrl = params.getValue('sUrl') | |
| sTVShowTitle = params.getValue('TVShowTitle') | |
| oRequest = cRequestHandler(sUrl) | |
| sHtmlContent = oRequest.request() | |
| pattern = '<div[^>]*class="hosterSiteDirectNav"[^>]*>.*?<ul>(.*?)</ul>' | |
| isMatch, sContainer = cParser.parseSingleResult(sHtmlContent, pattern) | |
| if isMatch: | |
| pattern = '<a[^>]*href="([^"]*)"[^>]*title="([^"]*)"[^>]*>(.*?)</a>.*?' | |
| isMatch, aResult = cParser.parse(sContainer, pattern) | |
| if not isMatch: | |
| cGui().showInfo() | |
| return | |
| isDesc, sDesc = cParser.parseSingleResult(sHtmlContent, '<p[^>]*data-full-description="(.*?)"[^>]*>') | |
| isThumbnail, sThumbnail = cParser.parseSingleResult(sHtmlContent, '<div[^>]*class="seriesCoverBox"[^>]*>.*?<img[^>]*src="([^"]*)"[^>]*>') | |
| if isThumbnail: | |
| if sThumbnail.startswith('/'): | |
| sThumbnail = URL_MAIN + sThumbnail | |
| total = len(aResult) | |
| for sUrl, sName, sNr in aResult: | |
| isMovie = sUrl.endswith('filme') | |
| if 'Alle Filme' in sName: | |
| sName = 'Filme' | |
| oGuiElement = cGuiElement(sName, SITE_IDENTIFIER, 'showEpisodes') | |
| oGuiElement.setMediaType('season' if not isMovie else 'movie') | |
| if isThumbnail: | |
| oGuiElement.setThumbnail(sThumbnail) | |
| if isDesc: | |
| oGuiElement.setDescription(sDesc) | |
| if not isMovie: | |
| oGuiElement.setTVShowTitle(sTVShowTitle) | |
| oGuiElement.setSeason(sNr) | |
| params.setParam('sSeason', sNr) | |
| params.setParam('sThumbnail', sThumbnail) | |
| params.setParam('sUrl', URL_MAIN + sUrl) | |
| cGui().addFolder(oGuiElement, params, True, total) | |
| cGui().setView('seasons') | |
| cGui().setEndOfDirectory() | |
| def showEpisodes(): | |
| params = ParameterHandler() | |
| sUrl = params.getValue('sUrl') | |
| sTVShowTitle = params.getValue('TVShowTitle') | |
| sSeason = params.getValue('sSeason') | |
| sThumbnail = params.getValue('sThumbnail') | |
| if not sSeason: | |
| sSeason = '0' | |
| isMovieList = sUrl.endswith('filme') | |
| oRequest = cRequestHandler(sUrl) | |
| if cConfig().getSetting('global_search_' + SITE_IDENTIFIER) == 'true': | |
| oRequest.cacheTime = 60 * 60 * 4 # HTML Cache Zeit 4 Stunden | |
| sHtmlContent = oRequest.request() | |
| pattern = '<table[^>]*class="seasonEpisodesList"[^>]*>(.*?)</table>' | |
| isMatch, sContainer = cParser.parseSingleResult(sHtmlContent, pattern) | |
| if isMatch: | |
| if isMovieList == True: | |
| pattern = r'<tr[^>]*data-episode-season-id="(\d+).*?<a href="([^"]+)">\s([^<]+).*?<strong>([^<]+)' | |
| isMatch, aResult = cParser.parse(sContainer, pattern) | |
| if not isMatch: | |
| pattern = r'<tr[^>]*data-episode-season-id="(\d+).*?<a href="([^"]+)">\s([^<]+).*?<span>([^<]+)' | |
| isMatch, aResult = cParser.parse(sContainer, pattern) | |
| else: | |
| pattern = r'<tr[^>]*data-episode-season-id="(\d+).*?<a href="([^"]+).*?(?:<strong>(.*?)</strong>.*?)?(?:<span>(.*?)</span>.*?)?<' | |
| isMatch, aResult = cParser.parse(sContainer, pattern) | |
| if not isMatch: | |
| cGui().showInfo() | |
| return | |
| isDesc, sDesc = cParser.parseSingleResult(sHtmlContent, '<p[^>]*data-full-description="(.*?)"[^>]*>') | |
| total = len(aResult) | |
| for sID, sUrl2, sNameGer, sNameEng in aResult: | |
| sName = '%d - ' % int(sID) | |
| if isMovieList == True: | |
| sName += sNameGer + '- ' + sNameEng | |
| else: | |
| sName += sNameGer if sNameGer else sNameEng | |
| oGuiElement = cGuiElement(sName, SITE_IDENTIFIER, 'showHosters') | |
| oGuiElement.setMediaType('episode' if not isMovieList else 'movie') | |
| oGuiElement.setThumbnail(sThumbnail) | |
| if isDesc: | |
| oGuiElement.setDescription(sDesc) | |
| if not isMovieList: | |
| oGuiElement.setSeason(sSeason) | |
| oGuiElement.setEpisode(int(sID)) | |
| oGuiElement.setTVShowTitle(sTVShowTitle) | |
| params.setParam('sUrl', URL_MAIN + sUrl2) | |
| params.setParam('entryUrl', sUrl) | |
| cGui().addFolder(oGuiElement, params, False, total) | |
| cGui().setView('episodes' if not isMovieList else 'movies') | |
| cGui().setEndOfDirectory() | |
| def showHosters(): | |
| hosters = [] | |
| sUrl = ParameterHandler().getValue('sUrl') | |
| sHtmlContent = cRequestHandler(sUrl, caching=False).request() | |
| if cConfig().getSetting('plugin_' + SITE_IDENTIFIER + '.domain') == 'www.aniworld.info': | |
| pattern = r'<li[^>]*episodeLink([^"]+)"\sdata-lang-key="([^"]+).*?data-link-target="([^"]+).*?<h4>([^<]+)<([^>]+)' | |
| pattern2 = r'itemprop="keywords".content=".*?Season...([^"]+).S.*?' # HD Kennzeichen | |
| # data-lang-key="1" Deutsch | |
| # data-lang-key="2" Japanisch mit englischen Untertitel | |
| # data-lang-key="3" Japanisch mit deutschen Untertitel | |
| isMatch, aResult = cParser.parse(sHtmlContent, pattern) | |
| aResult2 = cParser.parse(sHtmlContent, pattern2) # pattern 2 auslesen | |
| if isMatch: | |
| for sID, sLang, sUrl, sName, sQuality in aResult: | |
| # Die Funktion gibt 2 werte zurück! | |
| # element 1 aus array "[0]" True bzw. False | |
| # element 2 aus array "[1]" Name von domain / hoster - wird hier nicht gebraucht! | |
| sUrl = sUrl.replace('/dl/2010', '/redirect/' + sID) | |
| if cConfig().isBlockedHoster(sName)[0]: continue # Hoster aus settings.xml oder deaktivierten Resolver ausschließen | |
| sLanguage = cConfig().getSetting('prefLanguage') | |
| if sLanguage == '1': # Voreingestellte Sprache Deutsch in settings.xml | |
| if '2' in sLang: # data-lang-key="2" Japanisch mit englischen Untertitel | |
| continue | |
| elif '3' in sLang: # data-lang-key="3" Japanisch mit deutschen Untertitel | |
| continue | |
| elif sLang == '1': # data-lang-key="1" Deutsch | |
| sLang = '(DE)' # Anzeige der Sprache Deutsch | |
| if sLanguage == '2': # Voreingestellte Sprache Englisch in settings.xml | |
| cGui().showLanguage() # Kein Eintrag in der ausgewählten Sprache verfügbar | |
| continue | |
| if sLanguage == '3': # Voreingestellte Sprache Japanisch in settings.xml | |
| if '1' in sLang: # data-lang-key="1" Deutsch | |
| continue | |
| elif sLang == '3': # data-lang-key="3" Japanisch mit deutschen Untertitel | |
| sLang = '(JPN) Sub: (DE)' # Anzeige der Sprache Japanisch mit deutschen Untertitel | |
| elif sLang == '2': # data-lang-key="2" Japanisch mit englischen Untertitel | |
| sLang = '(JPN) Sub: (EN)' # Anzeige der Sprache Japanisch mit englischen Untertitel | |
| if sLanguage == '0': # Alle Sprachen | |
| if sLang == '1': # data-lang-key="1" | |
| sLang = '(DE)' # Anzeige der Sprache | |
| elif sLang == '3': # data-lang-key="3" | |
| sLang = '(JPN) Sub: (DE)' # Anzeige der Sprache Japanisch mit deutschen Untertitel | |
| elif sLang == '2': # data-lang-key="2" | |
| sLang = '(JPN) Sub: (EN)' # Anzeige der Sprache Japanisch mit englischen Untertitel | |
| if 'HD' in aResult2[1]: # Prüfen ob tuple aResult2 das Kennzeichen HD enthält, dann übersteuern | |
| sQuality = '720' | |
| else: | |
| sQuality = '480' | |
| # Ab hier wird der sName mit abgefragt z.B: | |
| # aus dem Log [serienstream]: ['/redirect/12286260', 'VOE'] | |
| # hier ist die sUrl = '/redirect/12286260' und der sName 'VOE' | |
| # hoster.py 194 | |
| hoster = {'link': [sUrl, sName], 'name': sName, 'displayedName': '%s [I]%s [%sp][/I]' % (sName, sLang, sQuality), 'quality': sQuality, 'languageCode': sLang} # Language Code für hoster.py Sprache Prio | |
| hosters.append(hoster) | |
| if hosters: | |
| hosters.append('getHosterUrl') | |
| if not hosters: | |
| cGui().showLanguage() | |
| return hosters | |
| else: | |
| pattern = r'<li[^>]*data-lang-key="([^"]+).*?data-link-target="([^"]+).*?<h4>([^<]+)<([^>]+)' | |
| pattern2 = r'itemprop="keywords".content=".*?Season...([^"]+).S.*?' # HD Kennzeichen | |
| # data-lang-key="1" Deutsch | |
| # data-lang-key="2" Japanisch mit englischen Untertitel | |
| # data-lang-key="3" Japanisch mit deutschen Untertitel | |
| isMatch, aResult = cParser.parse(sHtmlContent, pattern) | |
| aResult2 = cParser.parse(sHtmlContent, pattern2) # pattern 2 auslesen | |
| if isMatch: | |
| for sLang, sUrl, sName, sQuality in aResult: | |
| # Die Funktion gibt 2 werte zurück! | |
| # element 1 aus array "[0]" True bzw. False | |
| # element 2 aus array "[1]" Name von domain / hoster - wird hier nicht gebraucht! | |
| if cConfig().isBlockedHoster(sName)[0]: continue # Hoster aus settings.xml oder deaktivierten Resolver ausschließen | |
| sLanguage = cConfig().getSetting('prefLanguage') | |
| if sLanguage == '1': # Voreingestellte Sprache Deutsch in settings.xml | |
| if '2' in sLang: # data-lang-key="2" Japanisch mit englischen Untertitel | |
| continue | |
| elif '3' in sLang: # data-lang-key="3" Japanisch mit deutschen Untertitel | |
| continue | |
| elif sLang == '1': # data-lang-key="1" Deutsch | |
| sLang = '(DE)' # Anzeige der Sprache Deutsch | |
| if sLanguage == '2': # Voreingestellte Sprache Englisch in settings.xml | |
| cGui().showLanguage() # Kein Eintrag in der ausgewählten Sprache verfügbar | |
| continue | |
| if sLanguage == '3': # Voreingestellte Sprache Japanisch in settings.xml | |
| if '1' in sLang: # data-lang-key="1" Deutsch | |
| continue | |
| elif sLang == '3': # data-lang-key="3" Japanisch mit deutschen Untertitel | |
| sLang = '(JPN) Sub: (DE)' # Anzeige der Sprache Japanisch mit deutschen Untertitel | |
| elif sLang == '2': # data-lang-key="2" Japanisch mit englischen Untertitel | |
| sLang = '(JPN) Sub: (EN)' # Anzeige der Sprache Japanisch mit englischen Untertitel | |
| if sLanguage == '0': # Alle Sprachen | |
| if sLang == '1': # data-lang-key="1" | |
| sLang = '(DE)' # Anzeige der Sprache | |
| elif sLang == '3': # data-lang-key="3" | |
| sLang = '(JPN) Sub: (DE)' # Anzeige der Sprache Japanisch mit deutschen Untertitel | |
| elif sLang == '2': # data-lang-key="2" | |
| sLang = '(JPN) Sub: (EN)' # Anzeige der Sprache Japanisch mit englischen Untertitel | |
| if 'HD' in aResult2[1]: # Prüfen ob tuple aResult2 das Kennzeichen HD enthält, dann übersteuern | |
| sQuality = '720' | |
| else: | |
| sQuality = '480' | |
| # Ab hier wird der sName mit abgefragt z.B: | |
| # aus dem Log [serienstream]: ['/redirect/12286260', 'VOE'] | |
| # hier ist die sUrl = '/redirect/12286260' und der sName 'VOE' | |
| # hoster.py 194 | |
| hoster = {'link': [sUrl, sName], 'name': sName, 'displayedName': '%s [I]%s [%sp][/I]' % (sName, sLang, sQuality), 'quality': sQuality, 'languageCode': sLang} # Language Code für hoster.py Sprache Prio | |
| hosters.append(hoster) | |
| if hosters: | |
| hosters.append('getHosterUrl') | |
| if not hosters: | |
| cGui().showLanguage() | |
| return hosters | |
| def getHosterUrl(hUrl): | |
| if type(hUrl) == str: hUrl = eval(hUrl) | |
| username = cConfig().getSetting('aniworld.user') | |
| password = cConfig().getSetting('aniworld.pass') | |
| Handler = cRequestHandler(URL_LOGIN, caching=False) | |
| Handler.addHeaderEntry('Upgrade-Insecure-Requests', '1') | |
| Handler.addHeaderEntry('Referer', ParameterHandler().getValue('entryUrl')) | |
| Handler.addParameters('email', username) | |
| Handler.addParameters('password', password) | |
| Handler.request() | |
| Request = cRequestHandler(URL_MAIN + hUrl[0], caching=False) | |
| Request.addHeaderEntry('Referer', ParameterHandler().getValue('entryUrl')) | |
| Request.addHeaderEntry('Upgrade-Insecure-Requests', '1') | |
| Request.request() | |
| sUrl = Request.getRealUrl() | |
| if 'voe' in hUrl[1].lower(): | |
| isBlocked, sDomain = cConfig().isBlockedHoster(sUrl) # Die funktion gibt 2 werte zurück! | |
| if isBlocked: # Voe Pseudo sDomain nicht bekannt in resolveUrl | |
| sUrl = sUrl.replace(sDomain, 'voe.sx') | |
| return [{'streamUrl': sUrl, 'resolved': False}] | |
| return [{'streamUrl': sUrl, 'resolved': False}] | |
| def showSearch(): | |
| sSearchText = cGui().showKeyBoard(sHeading=cConfig().getLocalizedString(30281)) | |
| if not sSearchText: return | |
| _search(False, sSearchText) | |
| cGui().setEndOfDirectory() | |
| def _search(oGui, sSearchText): | |
| SSsearch(oGui, sSearchText) | |
| def SSsearch(sGui=False, sSearchText=False): | |
| oGui = sGui if sGui else cGui() | |
| params = ParameterHandler() | |
| params.getValue('sSearchText') | |
| oRequest = cRequestHandler(URL_SERIES, caching=True, ignoreErrors=(sGui is not False)) | |
| oRequest.addHeaderEntry('X-Requested-With', 'XMLHttpRequest') | |
| oRequest.addHeaderEntry('Referer', REFERER + '/animes') | |
| oRequest.addHeaderEntry('Origin', REFERER) | |
| oRequest.addHeaderEntry('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8') | |
| oRequest.addHeaderEntry('Upgrade-Insecure-Requests', '1') | |
| if cConfig().getSetting('global_search_' + SITE_IDENTIFIER) == 'true': | |
| oRequest.cacheTime = 60 * 60 * 24 # HTML Cache Zeit 1 Tag | |
| sHtmlContent = oRequest.request() | |
| if not sHtmlContent: | |
| return | |
| sst = sSearchText.lower() | |
| pattern = '<li><a data.+?href="([^"]+)".+?">(.*?)\<\/a><\/l' #link - title | |
| isMatch, aResult = cParser.parse(sHtmlContent, pattern) | |
| if not isMatch: | |
| oGui.showInfo() | |
| return | |
| aResult = sorted(aResult, key=lambda x: x[1].lower()) # Sort alphabetically by name (case-insensitive) | |
| total = len(aResult) | |
| for link, title in aResult: | |
| titleLow = title.lower() | |
| if not sst in titleLow and not cUtil.isSimilarByToken(sst, titleLow): | |
| continue | |
| else: | |
| #get images thumb / descr pro call. (optional) | |
| try: | |
| sThumbnail, sDescription = getMetaInfo(link, title) | |
| oGuiElement = cGuiElement(title, SITE_IDENTIFIER, 'showSeasons') | |
| oGuiElement.setThumbnail(URL_MAIN + sThumbnail) | |
| oGuiElement.setDescription(sDescription) | |
| oGuiElement.setTVShowTitle(title) | |
| oGuiElement.setMediaType('tvshow') | |
| params.setParam('sUrl', URL_MAIN + link) | |
| params.setParam('sName', title) | |
| oGui.addFolder(oGuiElement, params, True, total) | |
| except Exception: | |
| oGuiElement = cGuiElement(title, SITE_IDENTIFIER, 'showSeasons') | |
| oGuiElement.setTVShowTitle(title) | |
| oGuiElement.setMediaType('tvshow') | |
| params.setParam('sUrl', URL_MAIN + link) | |
| params.setParam('sName', title) | |
| oGui.addFolder(oGuiElement, params, True, total) | |
| if not sGui: | |
| oGui.setView('tvshows') | |
| def getMetaInfo(link, title): # Setzen von Metadata in Suche: | |
| oGui = cGui() | |
| oRequest = cRequestHandler(URL_MAIN + link, caching=False) | |
| oRequest.addHeaderEntry('X-Requested-With', 'XMLHttpRequest') | |
| oRequest.addHeaderEntry('Referer', REFERER + '/animes') | |
| oRequest.addHeaderEntry('Origin', REFERER) | |
| #GET CONTENT OF HTML | |
| sHtmlContent = oRequest.request() | |
| if not sHtmlContent: | |
| return | |
| pattern = 'seriesCoverBox">.*?<img src="([^"]+)"\ al.+?data-full-description="([^"]+)"' #img , descr | |
| aResult = cParser.parse(sHtmlContent, pattern) | |
| if not aResult[0]: | |
| return | |
| for sImg, sDescr in aResult[1]: | |
| return sImg, sDescr |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # kasi - SourceCode class _Storage() von https://github.com/vlmaksime/script.module.simpleplugin | |
| # 2022-10-05 | |
| # edit 2023-02-23 | |
| """ | |
| SimplePlugin micro-framework for Kodi content plugins | |
| **Author**: Roman Miroshnychenko aka Roman V.M. | |
| **License**: `GPL v.3 <https://www.gnu.org/copyleft/gpl.html>`_ | |
| """ | |
| import os,sys | |
| import time | |
| import hashlib | |
| import pickle | |
| from copy import deepcopy | |
| from shutil import copyfile | |
| import xbmc, xbmcaddon, xbmcvfs | |
| if sys.version_info.major == 3: | |
| from collections.abc import MutableMapping | |
| else: | |
| from collections import MutableMapping | |
| class _Storage(MutableMapping): | |
| """ | |
| Storage(storage_dir, filename='storage.pcl') | |
| Persistent storage for arbitrary data with a dictionary-like interface | |
| It is designed as a context manager and better be used | |
| with 'with' statement. | |
| :param storage_dir: directory for storage | |
| :type storage_dir: str | |
| :param filename: the name of a storage file (optional) | |
| :type filename: str | |
| Usage:: | |
| with Storage('/foo/bar/storage/') as storage: | |
| storage['key1'] = value1 | |
| value2 = storage['key2'] | |
| .. note:: After exiting :keyword:`with` block a :class:`Storage` instance | |
| is invalidated. Storage contents are saved to disk only for | |
| a new storage or if the contents have been changed. | |
| """ | |
| def __init__(self, storage_dir, filename='storage.pcl'): | |
| """ | |
| Class constructor | |
| :type storage_dir: str | |
| :type filename: str | |
| """ | |
| # insert by kasi | |
| name, ext = os.path.splitext(filename) | |
| if not ext: | |
| ext = '.pcl' | |
| filename = name + ext | |
| self._storage = {} | |
| self._hash = None | |
| self._filename = os.path.join(storage_dir, filename) | |
| try: | |
| with open(self._filename, 'rb') as fo: | |
| contents = fo.read() | |
| self._storage = pickle.loads(contents) | |
| self._hash = hashlib.md5(contents).hexdigest() | |
| except (IOError, pickle.PickleError, EOFError, AttributeError): | |
| pass | |
| def __enter__(self): | |
| return self | |
| def __exit__(self, t, v, tb): | |
| self.flush() | |
| def __getitem__(self, key): | |
| return self._storage[key] | |
| def __setitem__(self, key, value): | |
| self._storage[key] = value | |
| def __delitem__(self, key): | |
| del self._storage[key] | |
| def __iter__(self): | |
| return iter(self._storage) | |
| def __len__(self): | |
| return len(self._storage) | |
| def __str__(self): | |
| return '<Storage {0}>'.format(self._storage) | |
| def flush(self): | |
| """ | |
| Save storage contents to disk | |
| This method saves new and changed :class:`Storage` contents to disk | |
| and invalidates the Storage instance. Unchanged Storage is not saved | |
| but simply invalidated. | |
| """ | |
| contents = pickle.dumps(self._storage, protocol=2) | |
| if self._hash is None or hashlib.md5(contents).hexdigest() != self._hash: | |
| tmp = self._filename + '.tmp' | |
| start = time.time() | |
| while os.path.exists(tmp): | |
| if time.time() - start > 2.0: | |
| raise TimeoutError( | |
| 'Exceeded timeout for saving {0} contents!'.format(self) | |
| ) | |
| xbmc.sleep(100) | |
| try: | |
| with open(tmp, 'wb') as fo: | |
| fo.write(contents) | |
| copyfile(tmp, self._filename) | |
| finally: | |
| os.remove(tmp) | |
| del self._storage | |
| def copy(self): | |
| """ | |
| Make a copy of storage contents | |
| .. note:: this method performs a *deep* copy operation. | |
| :return: a copy of storage contents | |
| :rtype: dict | |
| """ | |
| return deepcopy(self._storage) | |
| def _py2_decode(s, encoding='utf-8'): | |
| """ | |
| Decode Python 2 ``str`` to ``unicode`` | |
| In Python 3 the string is not changed. | |
| """ | |
| if sys.version_info.major == 2 and isinstance(s, bytes): | |
| s = s.decode(encoding) | |
| return s | |
| def _get_storage(filename='storage.pcl'): | |
| """ | |
| Get a persistent :class:`Storage` instance for storing arbitrary values | |
| between addon calls. | |
| A :class:`Storage` instance can be used as a context manager. | |
| Example:: | |
| with plugin.get_storage() as storage: | |
| storage['param1'] = value1 | |
| value2 = storage['param2'] | |
| .. note:: After exiting :keyword:`with` block a :class:`Storage` | |
| instance is invalidated. | |
| :param filename: the name of a storage file (optional) | |
| :type filename: str | |
| :return: Storage object | |
| :rtype: Storage | |
| """ | |
| _profile_dir = _py2_decode(xbmcvfs.translatePath(xbmcaddon.Addon().getAddonInfo('profile'))) | |
| return _Storage(_profile_dir, filename) | |
| def save_query(idFile, timeInSeconds, filename=None): | |
| with _get_storage(filename) as storage: | |
| if 'queries' not in storage: | |
| storage['queries'] = [] | |
| entry = { | |
| 'idFile': idFile, | |
| 'time': timeInSeconds | |
| } | |
| for i in range(0, len(storage['queries'])): | |
| if storage['queries'][i]['idFile'] == idFile: | |
| storage['queries'].pop(i) | |
| storage['queries'].insert(0, entry) | |
| def remove_query(idFile, filename=None): | |
| with _get_storage(filename) as storage: | |
| for i in range(0, len(storage['queries'])): | |
| if storage['queries'][i]['idFile'] == idFile: | |
| storage['queries'].pop(i) | |
| def get_query(idFile, filename=None): | |
| with _get_storage(filename) as storage: | |
| for i in range(0, len(storage['queries'])): | |
| if storage['queries'][i]['idFile'] == idFile: | |
| return [idFile, storage['queries'][i]['time']] | |
| else: | |
| return [] | |
| # | |
| # def getSearchTerms(filename=None): | |
| # with _get_storage(filename) as storage: | |
| # if 'queries' not in storage: | |
| # storage['queries'] = [] | |
| # return storage['queries'] |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import re | |
| import unicodedata | |
| from resources.lib.control import HTMLParser, py2_encode, unescape | |
| from resources.lib import log_utils | |
| html = HTMLParser() | |
| def get(title): | |
| try: | |
| if title is None: | |
| return | |
| try: | |
| title = py2_encode(title) | |
| except: | |
| pass | |
| try: | |
| title = unescape(title) | |
| except: | |
| pass | |
| title = title.replace('\\xc3\\x84', 'Ä').replace('\\xc3\\xa4', 'ä') | |
| title = title.replace('\\xc3\\x96', 'Ö').replace('\\xc3\\xb6', 'ö') | |
| title = title.replace('\\xc3\\x9c', 'Ü').replace('\\xc3\\xbc', 'ü') | |
| title = title.replace('\\xc3\\x9f', 'ß') | |
| title = re.sub(r'&#(\d+);', '', title) | |
| title = re.sub(r'(&#[0-9]+)([^;^0-9]+)', r'\1;\2', title) | |
| title = title.replace('"', '\"').replace('&', '&') | |
| # title = re.sub('\n|([[].+?[]])|([(].+?[)])|\s(vs|v[.])\s|(:|;|-|â€"|"|,|\'|\_|\.|\?)|\~|\s', '', title) | |
| title = re.sub(r"\n|([\[].+?[\]])|([(].+?[)])|\s(vs|v[.])\s|([:;\-â€\",'_.?])|~|\s", '', title) | |
| return title.lower() | |
| except Exception as e: | |
| log_utils.log('Exception Raised: %s' % str(e), log_utils.LOGERROR) | |
| def geturl(title): | |
| if title is None: | |
| return | |
| title = title.lower() | |
| # title = title.translate(None, ':*?"\'\.<>|&!,') | |
| try: | |
| # This gives a weird error saying that translate only takes 1 argument, not 2. However, the Python 2 documentation states 2, but 1 for Python 3. | |
| # This has most likley to do with titles being unicode (foreign titles) | |
| title = title.translate(str.maketrans('', '', ':*?\"\'.<>|&!,')) | |
| except: | |
| for c in ':*?\"\'.<>|&!,': | |
| title = title.replace(c, '') | |
| title = title.replace('/', '-') | |
| title = title.replace(' ', '-') | |
| title = title.replace('--', '-') | |
| return title | |
| def get_url(title): | |
| if title is None: | |
| return | |
| title = title.replace(' ', '%20') | |
| return title | |
| def get_gan_url(title): | |
| if title is None: | |
| return | |
| title = title.lower() | |
| title = title.replace('-', '+') | |
| title = title.replace(' + ', '+-+') | |
| title = title.replace(' ', '%20') | |
| return title | |
| def get_simple(title): | |
| if title is None: | |
| return | |
| title = title.lower() | |
| title = re.sub(r'(\d{4})', '', title) | |
| title = re.sub(r'&#(\d+);', '', title) | |
| title = re.sub(r'(&#[0-9]+)([^;^0-9]+)', r'\1;\2', title) | |
| title = title.replace('"', '\"').replace('&', '&') | |
| # title = re.sub('\n|\(|\)|\[|\]|\{|\}|\s(vs|v[.])\s|(:|;|-|â€"|"|,|\'|\_|\.|\?)|\~|\s', '', title).lower() | |
| title = re.sub(r"\n|\(|\)|\[|\]|{|\}|\s(vs|v[.])\s|([:;\-â€\",'_.?])|~|\s", '', title).lower() | |
| title = re.sub(r'<.*?>', '', title, count=0) | |
| return title | |
| def getsearch(title): | |
| if title is None: | |
| return | |
| title = title.lower() | |
| title = re.sub(r'&#(\d+);', '', title) | |
| title = re.sub(r'(&#[0-9]+)([^;^0-9]+)', r'\1;\2', title) | |
| title = title.replace('"', '\"').replace('&', '&') | |
| # title = re.sub('\\\|/|-|â€"|:|;|\*|\?|"|\'|<|>|\|', '', title).lower() | |
| title = re.sub(r'[\\/\-â€":;*?"\'<>|]', '', title).lower() | |
| return title | |
| def query(title): | |
| if title is None: | |
| return | |
| title = title.replace('\'', '').rsplit(':', 1)[0].rsplit(' -', 1)[0].replace('-', ' ') | |
| return title | |
| def get_query(title): | |
| if title is None: | |
| return | |
| title = title.replace(' ', '.').replace(':', '').replace('.-.', '.').replace('\'', '') | |
| return title | |
| # def normalize(title): | |
| # from sys import version_info | |
| # try: | |
| # if version_info[0] > 2: return title | |
| # else: | |
| # try: return title.decode('ascii').encode("utf-8") | |
| # except: return str(''.join(c for c in unicodedata.normalize('NFKD', unicode(title.decode('utf-8'))) if unicodedata.category(c) != 'Mn')) | |
| # except: | |
| # return title | |
| def normalize(title): | |
| try: | |
| if isinstance(title, bytes): | |
| return title.decode('UTF-8') | |
| return title | |
| except: | |
| return title |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # 2023-05-10 | |
| # edit 2025-06-12 | |
| import sys, json | |
| from resources.lib import control | |
| params = dict(control.parse_qsl(control.urlsplit(sys.argv[2]).query)) | |
| action = params.get('action') | |
| name = params.get('name') | |
| table = params.get('table') | |
| title = params.get('title') | |
| source = params.get('source') | |
| # ------ navigator -------------- | |
| if action == None or action == 'root': | |
| from resources.lib.indexers import navigator | |
| navigator.navigator().root() | |
| elif action == 'pluginInfo': | |
| from resources.lib import supportinfo | |
| supportinfo.pluginInfo() | |
| elif action == 'movieNavigator': | |
| from resources.lib.indexers import navigator | |
| navigator.navigator().movies() | |
| elif action == 'tvNavigator': | |
| from resources.lib.indexers import navigator | |
| navigator.navigator().tvshows() | |
| elif action == 'toolNavigator': | |
| from resources.lib.indexers import navigator | |
| navigator.navigator().tools() | |
| elif action == 'downloadNavigator': | |
| from resources.lib.indexers import navigator | |
| navigator.navigator().downloads() | |
| # ------------------------------------------- | |
| elif action == 'download': | |
| image = params.get('image') | |
| from resources.lib import downloader | |
| from resources.lib import sources | |
| try: downloader.download(name, image, sources.sources().sourcesResolve(json.loads(source)[0], True)) | |
| except: pass | |
| elif action == 'playExtern': | |
| import json | |
| if not control.visible(): control.busy() | |
| try: | |
| sysmeta = {} | |
| for key, value in params.items(): | |
| if key == 'action': continue | |
| elif key == 'year' or key == 'season' or key == 'episode': value = int(value) | |
| if value == 0: continue | |
| sysmeta.update({key : value}) | |
| if int(params.get('season')) == 0: | |
| mediatype = 'movie' | |
| else: | |
| mediatype = 'tvshow' | |
| sysmeta.update({'mediatype': mediatype}) | |
| # if control.getSetting('hosts.mode') == '2': | |
| # sysmeta.update({'select': '2'}) | |
| # else: | |
| # sysmeta.update({'select': '1'}) | |
| sysmeta.update({'select': control.getSetting('hosts.mode')}) | |
| sysmeta = json.dumps(sysmeta) | |
| params.update({'sysmeta': sysmeta}) | |
| from resources.lib import sources | |
| sources.sources().play(params) | |
| except: | |
| pass | |
| elif action == 'playURL': | |
| try: | |
| import resolveurl | |
| import xbmcgui, xbmc | |
| #url = 'https://streamvid.net/embed-uhgo683xes41' | |
| #url = 'https://moflix-stream.click/v/gcd0aueegeia' | |
| url = xbmcgui.Dialog().input("URL Input") | |
| hmf = resolveurl.HostedMediaFile(url=url, include_disabled=True, include_universal=False) | |
| try: | |
| if hmf.valid_url(): url = hmf.resolve() | |
| except: | |
| pass | |
| item = xbmcgui.ListItem('URL-direkt') | |
| kodiver = int(xbmc.getInfoLabel("System.BuildVersion").split(".")[0]) | |
| if ".m3u8" in url or '.mpd' in url: | |
| item.setProperty("inputstream", "inputstream.adaptive") | |
| if '.mpd' in url: | |
| if kodiver < 21: item.setProperty('inputstream.adaptive.manifest_type', 'mpd') | |
| item.setMimeType('application/dash+xml') | |
| else: | |
| if kodiver < 21: item.setProperty('inputstream.adaptive.manifest_type', 'hls') | |
| item.setMimeType("application/vnd.apple.mpegurl") | |
| item.setContentLookup(False) | |
| if '|' in url: | |
| stream_url, strhdr = url.split('|') | |
| item.setProperty('inputstream.adaptive.stream_headers', strhdr) | |
| if kodiver > 19: item.setProperty('inputstream.adaptive.manifest_headers', strhdr) | |
| # item.setPath(stream_url) | |
| url = stream_url | |
| item.setPath(url) | |
| xbmc.Player().play(url, item) | |
| except: | |
| #print('Kein Video Link gefunden') | |
| control.infoDialog("Keinen Video Link gefunden", sound=True, icon='WARNING', time=1000) | |
| elif action == 'UpdatePlayCount': | |
| from resources.lib import playcountDB | |
| playcountDB.UpdatePlaycount(params) | |
| control.execute('Container.Refresh') | |
| # listings ------------------------------- | |
| elif action == 'listings': | |
| from resources.lib.indexers import listings | |
| listings.listings().get(params) | |
| elif action == 'movieYears': | |
| from resources.lib.indexers import listings | |
| listings.listings().movieYears() | |
| elif action == 'movieGenres': | |
| from resources.lib.indexers import listings | |
| listings.listings().movieGenres() | |
| elif action == 'tvGenres': | |
| from resources.lib.indexers import listings | |
| listings.listings().tvGenres() | |
| # search ---------------------- | |
| elif action == 'searchNew': | |
| from resources.lib import searchDB | |
| searchDB.search_new(table) | |
| elif action == 'searchClear': | |
| from resources.lib import searchDB | |
| searchDB.remove_all_query(table) | |
| # if len(searchDB.getSearchTerms()) == 0: | |
| # control.execute('Action(ParentDir)') | |
| elif action == 'searchDelTerm': | |
| from resources.lib import searchDB | |
| searchDB.remove_query(name, table) | |
| # if len(searchDB.getSearchTerms()) == 0: | |
| # control.execute('Action(ParentDir)') | |
| # person ---------------------- | |
| elif action == 'person': | |
| from resources.lib.indexers import person | |
| person.person().get(params) | |
| elif action == 'personSearch': | |
| from resources.lib.indexers import person | |
| person.person().search() | |
| elif action == 'personCredits': | |
| from resources.lib.indexers import person | |
| person.person().getCredits(params) | |
| elif action == 'playfromPerson': | |
| if not control.visible(): control.busy() | |
| sysmeta = json.loads(params['sysmeta']) | |
| if sysmeta['mediatype'] == 'movie': | |
| from resources.lib.indexers import movies | |
| sysmeta = movies.movies().super_meta(sysmeta['tmdb_id']) | |
| sysmeta = json.dumps(sysmeta) | |
| else: | |
| from resources.lib.indexers import tvshows | |
| sysmeta = tvshows.tvshows().super_meta(sysmeta['tmdb_id']) | |
| sysmeta = control.quote_plus(json.dumps(sysmeta)) | |
| params.update({'sysmeta': sysmeta}) | |
| from resources.lib import sources | |
| sources.sources().play(params) | |
| # movies ---------------------- | |
| elif action == 'movies': | |
| from resources.lib.indexers import movies | |
| movies.movies().get(params) | |
| elif action == 'moviesSearch': | |
| from resources.lib.indexers import movies | |
| movies.movies().search() | |
| # tvshows --------------------------------- | |
| elif action == 'tvshows': # 'tvshowPage' | |
| from resources.lib.indexers import tvshows | |
| tvshows.tvshows().get(params) | |
| elif action == 'tvshowsSearch': | |
| from resources.lib.indexers import tvshows | |
| tvshows.tvshows().search() | |
| # seasons --------------------------------- | |
| elif action == 'seasons': | |
| from resources.lib.indexers import seasons | |
| seasons.seasons().get(params) # params | |
| # episodes --------------------------------- | |
| elif action == 'episodes': | |
| from resources.lib.indexers import episodes | |
| episodes.episodes().get(params) | |
| # sources --------------------------------- | |
| elif action == 'play': | |
| if not control.visible(): control.busy() | |
| from resources.lib import sources | |
| sources.sources().play(params) | |
| elif action == 'addItem': | |
| from resources.lib import sources | |
| sources.sources().addItem(title) | |
| elif action == 'playItem': | |
| if not control.visible(): control.busy() | |
| from resources.lib import sources | |
| sources.sources().playItem(title, source) | |
| # Settings ------------------------------ | |
| elif action == "settings": # alle Quellen aktivieren / deaktivieren | |
| from resources import settings | |
| settings.run(params) | |
| elif action == 'addonSettings': | |
| # query = None | |
| query = params.get('query') | |
| control.openSettings(query) | |
| elif action == 'resetSettings': | |
| status = control.resetSettings() | |
| if status: | |
| control.reload_profile() | |
| control.sleep(500) | |
| control.execute('RunAddon("%s")' % control.addonId) | |
| elif action == 'resolverSettings': | |
| import resolveurl as resolver | |
| resolver.display_settings() | |
| # try: | |
| # import pydevd | |
| # if pydevd.connected: pydevd.kill_all_pydev_threads() | |
| # except: | |
| # pass | |
| # finally: | |
| # exit() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| """ | |
| Based on Parsedom for XBMC plugins | |
| Copyright (C) 2010-2011 Tobias Ussing And Henrik Mosgaard Jensen | |
| This program is free software: you can redistribute it and/or modify | |
| it under the terms of the GNU General Public License as published by | |
| the Free Software Foundation, either version 3 of the License, or | |
| (at your option) any later version. | |
| This program is distributed in the hope that it will be useful, | |
| but WITHOUT ANY WARRANTY; without even the implied warranty of | |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
| GNU General Public License for more details. | |
| You should have received a copy of the GNU General Public License | |
| along with this program. If not, see <http://www.gnu.org/licenses/>. | |
| """ | |
| import re | |
| from six import iteritems, string_types | |
| from collections import namedtuple | |
| import sys | |
| def py2_decode(value): | |
| if sys.version_info.major == 2: | |
| try: | |
| return value.decode('utf-8') | |
| except: | |
| return value | |
| return value | |
| DomMatch = namedtuple('DOMMatch', ['attrs', 'content']) | |
| re_type = type(re.compile('')) | |
| try: | |
| basestring | |
| except NameError: | |
| basestring = str | |
| def __get_dom_content(html, name, match): | |
| if match.endswith('/>'): | |
| return '' | |
| # override tag name with tag from match if possible | |
| tag = re.match(r'<([^\s/>]+)', match) | |
| if tag: | |
| name = tag.group(1) | |
| start_str = '<%s' % name | |
| end_str = "</%s" % name | |
| # start/end tags without matching case cause issues | |
| start = html.find(match) | |
| end = html.find(end_str, start) | |
| pos = html.find(start_str, start + 1) | |
| while pos < end and pos != -1: # Ignore too early </endstr> return | |
| tend = html.find(end_str, end + len(end_str)) | |
| if tend != -1: | |
| end = tend | |
| pos = html.find(start_str, pos + 1) | |
| if start == -1 and end == -1: | |
| result = '' | |
| elif start > -1 and end > -1: | |
| result = html[start + len(match):end] | |
| elif end > -1: | |
| result = html[:end] | |
| elif start > -1: | |
| result = html[start + len(match):] | |
| else: | |
| result = '' | |
| return result | |
| def __get_dom_elements(item, name, attrs): | |
| if not attrs: | |
| pattern = r'(<%s(?:\s[^>]*>|/?>))' % name | |
| this_list = re.findall(pattern, item, re.M | re.S | re.I) | |
| else: | |
| last_list = None | |
| for key, value in iteritems(attrs): | |
| value_is_regex = isinstance(value, re_type) | |
| value_is_str = isinstance(value, basestring) | |
| pattern = r'''(<{tag}[^>]*\s{key}=(?P<delim>['"])(.*?)(?P=delim)[^>]*>)'''.format(tag=name, key=key) | |
| re_list = re.findall(pattern, item, re.M | re.S | re.I) | |
| if value_is_regex: | |
| this_list = [r[0] for r in re_list if re.match(value, r[2])] | |
| else: | |
| temp_value = [value] if value_is_str else value | |
| this_list = [r[0] for r in re_list if set(temp_value) <= set(r[2].split(' '))] | |
| if not this_list: | |
| has_space = (value_is_regex and ' ' in value.pattern) or (value_is_str and ' ' in value) | |
| if not has_space: | |
| pattern = r'''(<{tag}[^>]*\s{key}=((?:[^\s>]|/>)*)[^>]*>)'''.format(tag=name, key=key) | |
| re_list = re.findall(pattern, item, re.M | re.S | re.I) | |
| if value_is_regex: | |
| this_list = [r[0] for r in re_list if re.match(value, r[1])] | |
| else: | |
| this_list = [r[0] for r in re_list if value == r[1]] | |
| if last_list is None: | |
| last_list = this_list | |
| else: | |
| last_list = [item for item in this_list if item in last_list] | |
| this_list = last_list | |
| return this_list | |
| def __get_attribs(element): | |
| attribs = {} | |
| for match in re.finditer( | |
| r'''\s+(?P<key>[^=]+)=\s*(?:(?P<delim>["'])(?P<value1>.*?)(?P=delim)|(?P<value2>[^"'][^>\s]*))''', element): | |
| match = match.groupdict() | |
| value1 = match.get('value1') | |
| value2 = match.get('value2') | |
| value = value1 if value1 is not None else value2 | |
| if value is None: | |
| continue | |
| attribs[match['key'].lower().strip()] = value | |
| return attribs | |
| def parse_dom(html, name='', attrs=None, req=False, exclude_comments=False): | |
| if attrs is None: | |
| attrs = {} | |
| name = name.strip() | |
| if isinstance(html, string_types) or isinstance(html, DomMatch): | |
| html = [html] | |
| elif isinstance(html, str): | |
| try: | |
| html = [py2_decode(html)] # # html.decode("utf-8") | |
| except: | |
| try: | |
| html = [html.decode("utf-8", "replace")] | |
| except: | |
| html = [html] | |
| elif not isinstance(html, list): | |
| return '' | |
| if not name: | |
| return '' | |
| if not isinstance(attrs, dict): | |
| return '' | |
| if req: | |
| if not isinstance(req, list): | |
| req = [req] | |
| req = set([key.lower() for key in req]) | |
| all_results = [] | |
| for item in html: | |
| if isinstance(item, DomMatch): | |
| item = item.content | |
| if exclude_comments: | |
| item = re.sub(re.compile('<!--.*?-->', re.DOTALL), '', item) | |
| results = [] | |
| for element in __get_dom_elements(item, name, attrs): | |
| attribs = __get_attribs(element) | |
| if req and not req <= set(attribs.keys()): | |
| continue | |
| temp = __get_dom_content(item, name, element).strip() | |
| results.append(DomMatch(attribs, temp)) | |
| item = item[item.find(temp, item.find(element)):] | |
| all_results += results | |
| return all_results |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # last edit: | |
| # 2023-03-20 | |
| import re | |
| import json | |
| import os, sys | |
| import inspect | |
| import xbmc, xbmcgui, xbmcvfs | |
| try: | |
| from urlparse import urlparse, parse_qsl | |
| from urllib import quote_plus, unquote_plus | |
| from urllib2 import Request, urlopen | |
| except ImportError: | |
| from urllib.parse import urlparse, quote_plus, parse_qsl, unquote_plus | |
| from urllib.request import Request, urlopen | |
| def download(name, image, url, subfolder=None): # new | |
| if url == None: return | |
| from resources.lib import control | |
| try: headers = dict(parse_qsl(url.rsplit('|', 1)[1])) | |
| except: headers = dict('') | |
| url = url.split('|')[0] | |
| content = re.compile(r'(.+?)\sS(\d*)E\d*$').findall(name) | |
| if int(xbmc.getInfoLabel("System.BuildVersion").split(".")[0]) >= 19: | |
| table = str.maketrans('', '', '\/:*?"<>|') | |
| transname = name.translate(table).strip('.') | |
| else: | |
| transname = name.translate(None, '\/:*?"<>|').strip('.') | |
| transname = transname.replace(' ', '_') # new | |
| levels =['../../../..', '../../..', '../..', '..'] | |
| if len(content) == 0: | |
| dest = control.getSetting('download.movie.path', False) #TODO | |
| dest = control.translatePath(dest) | |
| for level in levels: | |
| try: control.makeFile(os.path.abspath(os.path.join(dest, level))) | |
| except: pass | |
| if not control.makeFile(dest): | |
| xbmcgui.Dialog().ok(name, dest + '[CR]ERROR - Server | Verzeichnis[CR]Download fehlgeschlagen') | |
| return | |
| # new | |
| if subfolder != None: | |
| dest = os.path.join(dest, subfolder) | |
| # if subfolder == None: | |
| # dest = os.path.join(dest, transname) | |
| # else: | |
| # dest = os.path.join(dest, subfolder) | |
| if not control.makeFile(dest): | |
| xbmcgui.Dialog().ok(name, dest + '[CR]ERROR - Server | Verzeichnis[CR]Download fehlgeschlagen') | |
| return | |
| else: | |
| dest = control.getSetting('download.tv.path', False) #TODO | |
| dest = control.translatePath(dest) | |
| for level in levels: | |
| try: control.makeFile(os.path.abspath(os.path.join(dest, level))) | |
| except: pass | |
| if not control.makeFile(dest): | |
| xbmcgui.Dialog().ok(name, dest + '[CR]ERROR - Server | Verzeichnis[CR]Download fehlgeschlagen') | |
| return | |
| if int(xbmc.getInfoLabel("System.BuildVersion").split(".")[0]) >= 19: | |
| table = str.maketrans('', '', '\/:*?"<>|') | |
| transtvshowtitle = content[0][0].translate(table).strip('.') | |
| else: | |
| transtvshowtitle = content[0][0].translate(None, '\/:*?"<>|').strip('.') | |
| dest = os.path.join(dest, transtvshowtitle) | |
| if not control.makeFile(dest): | |
| xbmcgui.Dialog().ok(name, dest + '[CR]ERROR - Server | Verzeichnis[CR]Download fehlgeschlagen') | |
| return | |
| dest = os.path.join(dest, 'Season %01d' % int(content[0][1])) | |
| if not control.makeFile(dest): | |
| xbmcgui.Dialog().ok(name, dest + '[CR]ERROR - Server | Verzeichnis[CR]Download fehlgeschlagen') | |
| return | |
| ext = os.path.splitext(urlparse(url).path)[1][1:] | |
| if not ext in ['mp4', 'mkv', 'flv', 'avi', 'mpg']: ext = 'mp4' | |
| dest = os.path.join(dest, transname + '.' + ext) | |
| sysheaders = quote_plus(json.dumps(headers)) | |
| sysurl = quote_plus(url) | |
| systitle = quote_plus(name) | |
| sysimage = quote_plus(image) | |
| sysdest = quote_plus(dest) | |
| script = inspect.getfile(inspect.currentframe()) | |
| cmd = 'RunScript(%s, %s, %s, %s, %s, %s)' % (script, sysurl, sysdest, systitle, sysimage, sysheaders) | |
| xbmc.executebuiltin(cmd) | |
| def getResponse(url, headers, size): | |
| try: | |
| if size > 0: | |
| size = int(size) | |
| headers['Range'] = 'bytes=%d-' % size | |
| req = Request(url, headers=headers) | |
| resp = urlopen(req, timeout=30) | |
| return resp | |
| except: | |
| return None | |
| def done(title, dest, downloaded): | |
| playing = xbmc.Player().isPlaying() | |
| text = xbmcgui.Window(10000).getProperty('GEN-DOWNLOADED') | |
| if len(text) > 0: | |
| text += '[CR]' | |
| if downloaded: | |
| text += '%s : %s' % (dest.rsplit(os.sep)[-1], '[COLOR forestgreen]Download erfolgreich[/COLOR]') | |
| else: | |
| text += '%s : %s' % (dest.rsplit(os.sep)[-1], '[COLOR red]Download fehlgeschlagen[/COLOR]') | |
| xbmcgui.Window(10000).setProperty('GEN-DOWNLOADED', text) | |
| if (not downloaded) or (not playing): | |
| xbmcgui.Dialog().ok(title, text) | |
| xbmcgui.Window(10000).clearProperty('GEN-DOWNLOADED') | |
| def doDownload(url, dest, title, image, headers): | |
| headers = json.loads(unquote_plus(headers)) | |
| url = unquote_plus(url) | |
| title = unquote_plus(title) | |
| image = unquote_plus(image) | |
| dest = unquote_plus(dest) | |
| file = dest.rsplit(os.sep, 1)[-1] | |
| resp = getResponse(url, headers, 0) | |
| if not resp: | |
| xbmcgui.Dialog().ok(title, dest + '[CR]Download fehlgeschlagen[CR]Keine Antwort vom Server') | |
| return | |
| try: content = int(resp.headers['Content-Length']) | |
| except: content = 0 | |
| if ".m3u" in url.lower(): | |
| # if url.lower().endswith('.m3u8'): | |
| try: | |
| import m3u8_To_MP4 | |
| if xbmcgui.Dialog().yesno('Download - ' + title, '%s[CR]Weiter mit Download?' % file, 'Weiter', 'Abbrechen') == 1: return | |
| dest = dest.replace('smb:', '') | |
| m3u8_To_MP4.multithread_download(url, file_path=dest, max_retry_times=5, max_num_workers=20) | |
| xbmcgui.Dialog().ok('INFO', '[CR]Download beendet') | |
| except: | |
| xbmcgui.Dialog().ok('Error', '[CR]Download fehlgeschlagen[CR]Problem mit "script.module.download-m3u8"') | |
| finally: | |
| exit() | |
| try: resumable = 'bytes' in resp.headers['Accept-Ranges'].lower() | |
| except: resumable = False | |
| #print "Download Header" | |
| #print resp.headers | |
| #if resumable: print("Download is resumable") | |
| if content < 1: | |
| xbmcgui.Dialog().ok(title, file + ' Unbekannte Dateigröße[CR]Download nicht möglich') | |
| return | |
| size = 1024 * 1024 | |
| mb = content / (1024 * 1024) | |
| if content < size: | |
| size = content | |
| total = 0 | |
| notify = 0 | |
| errors = 0 | |
| count = 0 | |
| resume = 0 | |
| sleep = 0 | |
| if int(xbmc.getInfoLabel("System.BuildVersion").split(".")[0]) >= 19: | |
| if xbmcgui.Dialog().yesno('Download - ' + title, '%s[CR]Dateigröße %dMB[CR]Weiter mit Download?' % (file, mb) , 'Weiter', 'Abbrechen') == 1: return | |
| else: | |
| if xbmcgui.Dialog().yesno('Download - ' + title, file, 'Dateigröße %dMB' % mb, 'Weiter mit Download?', 'Weiter', 'Abbrechen') == 1: return | |
| #print('Download File Size : %dMB %s ' % (mb, dest)) | |
| #f = open(dest, mode='wb') | |
| f = xbmcvfs.File(dest, 'w') | |
| chunks = [] | |
| while True: | |
| downloaded = total | |
| for c in chunks: | |
| downloaded += len(c) | |
| percent = min(100 * downloaded / content, 100) | |
| if percent >= notify: | |
| # xbmc.executebuiltin( "Notification(%s,%s,%i,%s)" % ( str(int(percent))+'%' + ' - ' + title, dest, 5000, image)) | |
| xbmcgui.Dialog().notification(str(int(percent))+'%' + ' - ' + title, dest, image, 5000, False) | |
| #print('Download percent : %s %s %dMB downloaded : %sMB File Size : %sMB' % (str(int(percent))+'%', dest, mb, downloaded / 1000000, content / 1000000)) | |
| notify += 20 | |
| chunk = None | |
| error = False | |
| try: | |
| chunk = resp.read(size) | |
| if not chunk: | |
| if percent < 99: | |
| error = True | |
| else: | |
| while len(chunks) > 0: | |
| c = chunks.pop(0) | |
| f.write(c) | |
| del c | |
| f.close() | |
| #print('%s download complete' % (dest)) | |
| return done(title, dest, True) | |
| except Exception as e: | |
| #print(str(e)) | |
| error = True | |
| sleep = 10 | |
| errno = 0 | |
| if hasattr(e, 'errno'): | |
| errno = e.errno | |
| if errno == 10035: # 'A non-blocking socket operation could not be completed immediately' | |
| pass | |
| if errno == 10054: #'An existing connection was forcibly closed by the remote host' | |
| errors = 10 #force resume | |
| sleep = 30 | |
| if errno == 11001: # 'getaddrinfo failed' | |
| errors = 10 #force resume | |
| sleep = 30 | |
| if chunk: | |
| errors = 0 | |
| chunks.append(chunk) | |
| if len(chunks) > 5: | |
| c = chunks.pop(0) | |
| f.write(c) | |
| total += len(c) | |
| del c | |
| if error: | |
| errors += 1 | |
| count += 1 | |
| #print('%d Error(s) whilst downloading %s' % (count, dest)) | |
| xbmc.sleep(sleep*1000) | |
| if (resumable and errors > 0) or errors >= 10: | |
| if (not resumable and resume >= 50) or resume >= 500: | |
| #Give up! | |
| #print('%s download canceled - too many error whilst downloading' % (dest)) | |
| return done(title, dest, False) | |
| resume += 1 | |
| errors = 0 | |
| if resumable: | |
| chunks = [] | |
| #create new response | |
| #print('Download resumed (%d) %s' % (resume, dest)) | |
| resp = getResponse(url, headers, total) | |
| else: | |
| #use existing response | |
| pass | |
| if __name__ == '__main__': | |
| if 'downloader.py' in sys.argv[0]: | |
| doDownload(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]) | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # einschalten | |
| # 2024-09-05 | |
| # edit 2025-06-15 | |
| from resources.lib.utils import isBlockedHoster | |
| import json | |
| from resources.lib.requestHandler import cRequestHandler | |
| from scrapers.modules.tools import cParser | |
| from resources.lib.control import urljoin, getSetting, urlparse | |
| from scrapers.modules import cleantitle, dom_parser | |
| SITE_IDENTIFIER = 'einschalten' | |
| SITE_DOMAIN = 'einschalten.in' | |
| SITE_NAME = SITE_IDENTIFIER.upper() | |
| class source: | |
| def __init__(self): | |
| self.priority = 1 | |
| self.language = ['de'] | |
| self.domain = getSetting('provider.' + SITE_IDENTIFIER + '.domain', SITE_DOMAIN) | |
| self.base_link = 'https://' + self.domain | |
| self.search_link = self.base_link + '/search?query=%s' | |
| self.sources = [] | |
| def run(self, titles, year, season=0, episode=0, imdb='', hostDict=None): | |
| if season > 0: return self.sources | |
| try: | |
| t = [cleantitle.get(i) for i in set(titles) if i] | |
| links = [] | |
| for sSearchText in set(titles): | |
| URL_SEARCH = self.search_link % sSearchText | |
| oRequest = cRequestHandler(URL_SEARCH, caching=True) | |
| oRequest.cacheTime = 60 * 60 #* 48 # 48 Stunden | |
| sHtmlContent = oRequest.request() | |
| # pattern = 'class="group.*?href="([^"]+).*?title="([^"]+).*?alt=.*?(\d+)' | |
| pattern = r'class="group.*?title="([^"]+).*?href="([^"]+).*?span>(\d+)' | |
| isMatch, aResult = cParser.parse(sHtmlContent, pattern) | |
| if not isMatch: continue | |
| for sName, sUrl, sYear in aResult: | |
| if year == int(sYear): | |
| if cleantitle.get(sName) in set(t) and sUrl not in links: | |
| links.append(sUrl) | |
| break | |
| if len(links) > 0: break | |
| if len(links) == 0: return self.sources | |
| for link in set(links): | |
| sUrl = self.base_link + '/api' + link + '/watch' | |
| sHtmlContent = cRequestHandler(sUrl).request() | |
| if not 'streamUrl' in sHtmlContent: continue | |
| jResult = json.loads(sHtmlContent) | |
| releaseName = jResult['releaseName'] | |
| if '720p' in releaseName: quality = '720p' | |
| elif '1080p' in releaseName: quality = '1080p' | |
| else: quality = 'SD' | |
| streamUrl = jResult['streamUrl'] | |
| isBlocked, hoster, url, prioHoster = isBlockedHoster(streamUrl) | |
| if isBlocked: continue | |
| if url: self.sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': url, 'direct': True, 'prioHoster': prioHoster}) | |
| return self.sources | |
| except: | |
| return self.sources | |
| def resolve(self, url): | |
| return url |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #2021-07-15 | |
| # edit 2025-08-02 switch from treads to concurrent.futures | |
| import sys, re | |
| import datetime, json, time | |
| from resources.lib import control, playcountDB | |
| from resources.lib.tmdb import cTMDB | |
| from concurrent.futures import ThreadPoolExecutor | |
| from resources.lib.control import getKodiVersion | |
| if int(getKodiVersion()) >= 20: from infotagger.listitem import ListItemInfoTag | |
| _params = dict(control.parse_qsl(sys.argv[2].replace('?',''))) if len(sys.argv) > 1 else dict() | |
| class episodes: | |
| def __init__(self): | |
| self.list = [] | |
| self.lang = "de" | |
| self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours=5)) | |
| self.systime = (self.datetime).strftime('%Y%m%d%H%M%S%f') | |
| self.sysmeta = _params['sysmeta'] | |
| self.ePosition = 0 | |
| def get(self, params): | |
| try: | |
| data = json.loads(params['sysmeta']) | |
| self.title = data['title'] | |
| #number_of_episodes = data['number_of_episodes'] | |
| if not 'number_of_episodes' in data or not data['number_of_episodes']: return | |
| #tmdb_id = data['tmdb_id'] | |
| #tvdb_id = data['tvdb_id'] if 'tvdb_id' in data else None | |
| season = data['season'] | |
| episodes = data['episodes'] | |
| playcount = playcountDB.getPlaycount('season', 'title', self.title, season, None) | |
| if playcount is None: | |
| #playcountDB.createEntry('season', self.title, self.title + ' S%02d' % season, None, None, season, number_of_episodes, None) | |
| playcount = 0 | |
| self.sysmeta = re.sub(r'"playcount": \d', '"playcount": %s' % playcount, self.sysmeta) | |
| for i in episodes: | |
| self.list.append(i) | |
| # for i in range(1, number_of_episodes+1): | |
| # self.list.append({'tmdb_id': tmdb_id, 'tvdb_id': tvdb_id, 'season': season, 'episode': i}) | |
| self.worker() | |
| self.Directory(self.list) | |
| return self.list | |
| except: | |
| return | |
| def worker(self): | |
| try: | |
| self.meta = [] | |
| #much faster | |
| with ThreadPoolExecutor() as executor: | |
| executor.map(self.super_meta, self.list) | |
| self.meta = sorted(self.meta, key=lambda k: k['episode']) | |
| self.list = [i for i in self.meta] # falls noch eine Filterfunktion kommt | |
| # self.list = [i for i in self.list if not i['plot'].strip() == '' and not i['poster'] == control.addonPoster()] # - Filter | |
| except: | |
| return | |
| def super_meta(self, i): | |
| try: | |
| #meta = cTMDB().get_meta_episode('episode', '', self.list[i]['tmdb_id'] , self.list[i]['season'], self.list[i]['episode'], advanced='true') | |
| meta = cTMDB()._format_episodes(i, self.title) | |
| try: | |
| playcount = playcountDB.getPlaycount('episode', 'title', self.title, meta['season'], meta['episode']) # mediatype, column_names, column_value, season=0, episode=0 | |
| playcount = playcount if playcount else 0 | |
| overlay = 7 if playcount > 0 else 6 | |
| meta.update({'playcount': playcount, 'overlay': overlay}) | |
| except: | |
| pass | |
| self.meta.append(meta) | |
| except: | |
| pass | |
| def Directory(self, items): | |
| # if xbmc.getInfoLabel("Container.Viewmode") != 55: xbmc.executebuiltin( "Container.SetViewMode(%i)" % 55 ) | |
| if items == None or len(items) == 0: | |
| control.idle() | |
| sys.exit() | |
| sysaddon = sys.argv[0] | |
| syshandle = int(sys.argv[1]) | |
| addonPoster, addonBanner = control.addonPoster(), control.addonBanner() | |
| addonFanart, settingFanart = control.addonFanart(), control.getSetting('fanart') | |
| watchedMenu = "In %s [I]Gesehen[/I]" % control.addonName | |
| unwatchedMenu = "In %s [I]Ungesehen[/I]" % control.addonName | |
| pos = 0 | |
| for i in items: | |
| try: | |
| meta = json.loads(self.sysmeta) | |
| meta.pop('episodes', None) | |
| sysmeta = json.loads(self.sysmeta) | |
| sysmeta.pop('episodes', None) | |
| season = i['season'] | |
| episode = i['episode'] | |
| systitle = sysmeta['title'] | |
| sysname = systitle + ' S%02dE%02d' % (season, episode) | |
| sysmeta.update({'episode': episode}) | |
| sysmeta.update({'sysname': sysname}) | |
| _sysmeta = control.quote_plus(json.dumps(sysmeta)) | |
| if 'title' in i and i['title']: label = '%sx%02d %s' % (season, episode, i['title']) | |
| else: label = '%sx%02d Episode %s' % (season, episode, episode) | |
| if datetime.datetime(*(time.strptime(i['premiered'], "%Y-%m-%d")[0:6])) > datetime.datetime.now(): | |
| label = '[COLOR=red][I]{}[/I][/COLOR]'.format(label) # ffcc0000 | |
| poster = i['poster'] if 'poster' in i and 'http' in i['poster'] else sysmeta['poster'] | |
| fanart = sysmeta['fanart'] if 'fanart' in sysmeta else addonFanart | |
| plot = '' | |
| if 'plot' in i and len(i['plot']) > 50: | |
| plot = i['plot'] | |
| sysmeta.update({'plot': plot}) | |
| #plot = i['plot'] if 'plot' in i and len(i['plot']) > 50 else '' #sysmeta['plot'] | |
| plot = '[COLOR blue]%s%sStaffel: %s Episode: %s[/COLOR]%s%s' % (meta['title'], "\n",i['season'], i['episode'], "\n\n", plot) | |
| meta.update({'poster': poster}) | |
| meta.update({'fanart': fanart}) | |
| meta.update({'plot': plot}) | |
| if 'premiered' in i and i['premiered']: meta.update({'premiered': i['premiered']}) | |
| item = control.item(label=label, offscreen=True) | |
| item.setArt({'poster': poster, 'banner': addonBanner}) | |
| if settingFanart == 'true': item.setProperty('Fanart_Image', fanart) | |
| cm = [] | |
| try: | |
| playcount = i['playcount'] if sysmeta['playcount'] == 0 else 1 | |
| if playcount == 1: | |
| cm.append((unwatchedMenu, 'RunPlugin(%s?action=UpdatePlayCount&meta=%s&playCount=0)' % (sysaddon, _sysmeta))) | |
| meta.update({'playcount': 1, 'overlay': 7}) | |
| sysmeta.update({'playcount': 1, 'overlay': 7}) | |
| pos = episode + 1 | |
| if len(items) == episode: pos = episode | |
| else: | |
| cm.append((watchedMenu, 'RunPlugin(%s?action=UpdatePlayCount&meta=%s&playCount=1)' % (sysaddon, _sysmeta))) | |
| meta.update({'playcount': 0, 'overlay': 6}) | |
| sysmeta.update({'playcount': 0, 'overlay': 6}) | |
| except: | |
| pass | |
| cm.append(('Einstellungen', 'RunPlugin(%s?action=addonSettings)' % sysaddon)) | |
| item.addContextMenuItems(cm) | |
| sysmeta = control.quote_plus(json.dumps(sysmeta)) | |
| url = '%s?action=play&sysmeta=%s' % (sysaddon, sysmeta) | |
| aActors = [] | |
| if 'cast' in meta and meta['cast']: aActors = meta['cast'] | |
| # # # remove unsupported InfoLabels | |
| meta.pop('cast', None) | |
| meta.pop('fanart', None) | |
| meta.pop('poster', None) | |
| meta.pop('imdb_id', None) | |
| meta.pop('tvdb_id', None) | |
| meta.pop('tmdb_id', None) | |
| meta.pop('number_of_seasons', None) | |
| meta.pop('number_of_episodes', None) | |
| meta.pop('originallanguage', None) | |
| meta.pop('sysname', None) | |
| meta.pop('systitle', None) | |
| meta.pop('year', None) | |
| meta.pop('aliases', None) | |
| meta.pop('backdrop_url', None) | |
| meta.pop('cover_url', None) | |
| # gefakte Video/Audio Infos | |
| # video_streaminfo = {'codec': 'h264', "width": 1920, "height": 1080} | |
| # audio_streaminfo = {'codec': 'dts', 'channels': 6, 'language': 'de'} | |
| video_streaminfo = {} | |
| audio_streaminfo = {} | |
| if int(getKodiVersion()) <= 19: | |
| if aActors: item.setCast(aActors) | |
| item.setInfo(type='Video', infoLabels=meta) | |
| item.addStreamInfo('video', video_streaminfo) | |
| item.addStreamInfo('audio', audio_streaminfo) | |
| else: | |
| info_tag = ListItemInfoTag(item, 'video') | |
| info_tag.set_info(meta) | |
| stream_details = { | |
| 'video': [video_streaminfo], | |
| 'audio': [audio_streaminfo]} | |
| info_tag.set_stream_details(stream_details) | |
| info_tag.set_cast(aActors) | |
| control.addItem(handle=syshandle, url=url, listitem=item, isFolder=False) | |
| except: | |
| pass | |
| control.content(syshandle, 'movies') # 'episodes' cpu last sehr hoch / movies | |
| if control.skin == 'skin.estuary': | |
| control.execute('Container.SetViewMode(%s)' % str(55)) | |
| control.plugincategory(syshandle, control.addonVersion) | |
| control.endofdirectory(syshandle, cacheToDisc=True) | |
| control.sleep(200) | |
| # setzt Auswahl nach letzte als gesehen markierte Episode | |
| if control.getSetting('status.position')== 'true': | |
| from resources.lib.utils import setPosition | |
| setPosition(pos, __name__) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #2024-06-02 | |
| # edit 2024-12-14 | |
| from resources.lib.utils import isBlockedHoster | |
| from resources.lib.requestHandler import cRequestHandler | |
| from scrapers.modules import cleantitle, dom_parser | |
| from scrapers.modules.tools import cParser | |
| from resources.lib.control import getSetting, quote_plus | |
| SITE_IDENTIFIER = 'filmpro' | |
| SITE_DOMAIN = 'www.filmpalast.pro' | |
| SITE_NAME = SITE_IDENTIFIER.upper() | |
| class source: | |
| def __init__(self): | |
| self.priority = 1 | |
| self.language = ['de'] | |
| self.domain = getSetting('provider.' + SITE_IDENTIFIER + '.domain', SITE_DOMAIN) | |
| self.base_link = 'https://' + self.domain | |
| self.search_link = self.base_link + '/?story=%s&do=search&subaction=search&titleonly=3' | |
| # 'tt14088510' | |
| #self.search_link = 'https://meinecloud.click/ddl/%s' | |
| def run(self, titles, year, season=0, episode=0, imdb='', hostDict=None): | |
| sources = [] | |
| if season == 0: | |
| query = 'https://meinecloud.click/ddl/%s' % imdb | |
| oRequest = cRequestHandler(query) | |
| oRequest.cacheTime = 60 * 60 * 24 * 2 | |
| sHtmlContent = oRequest.request() | |
| pattern = "window.open.*?'([^']+).*?mark>([^<]+)" | |
| isMatch, aResults = cParser.parse(sHtmlContent, pattern) | |
| if isMatch: | |
| for link, quality in aResults: | |
| isBlocked, hoster, sUrl, prioHoster = isBlockedHoster(link) | |
| if isBlocked: continue | |
| if sUrl: sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': sUrl, 'direct': True, 'prioHoster': prioHoster}) | |
| return sources | |
| else: | |
| try: | |
| url = '' | |
| t = [cleantitle.get(i) for i in titles if i] | |
| for title in titles: | |
| try: | |
| query = self.search_link % quote_plus(title) | |
| oRequest = cRequestHandler(query) | |
| oRequest.cacheTime = 60 * 60 * 24 * 2 | |
| sHtmlContent = oRequest.request() | |
| r = dom_parser.parse_dom(sHtmlContent, 'ul', attrs={'id': 'dle-content'}) | |
| if r: | |
| r = dom_parser.parse_dom(r, 'li') #, req='href') | |
| if len(r) == 0: continue | |
| for i in r: | |
| pattern = r'href="([^"]+).*?Title">([^<]+).*?Year">(\d+).*?Qlty">([^<]+)' | |
| sUrl, sTitle, sYear, sQuality = cParser.parse(i.content, pattern)[1][0] | |
| sTitle = sTitle.split(' - Der')[0] | |
| if sYear == str(year) and cleantitle.get(sTitle) in t: | |
| url = sUrl | |
| if url: break | |
| if url: | |
| break | |
| except: | |
| pass | |
| if url == '': return sources | |
| oRequest = cRequestHandler(url) | |
| oRequest.cacheTime = 60 * 60 * 24 | |
| sHtmlContent = oRequest.request() | |
| pattern='data-num="%sx%s".*?"mirrors">(.*?)</div' % (season, episode) | |
| isMatch, dataLinks= cParser.parse(sHtmlContent, pattern) | |
| pattern='(http[^"]+)' | |
| aResults=cParser.parse(dataLinks[0], pattern)[1] | |
| for link in aResults: | |
| isBlocked, sDomain, sUrl, prioHoster = isBlockedHoster(link) | |
| if isBlocked: continue | |
| if url: sources.append({'source': sDomain, 'quality': 'HD', 'language': 'de', 'url': sUrl, 'direct': True, 'prioHoster': prioHoster}) | |
| return sources | |
| except: | |
| return sources | |
| def resolve(self, url): | |
| try: | |
| return url | |
| except: | |
| return | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # edit 2025-02-13 | |
| from resources.lib.utils import isBlockedHoster | |
| from scrapers.modules.tools import cParser | |
| from resources.lib.requestHandler import cRequestHandler | |
| from scrapers.modules import cleantitle, dom_parse | |
| from resources.lib.control import getSetting, quote | |
| SITE_IDENTIFIER = 'hdfilme' | |
| SITE_DOMAIN = 'hdfilme.legal' # https://www.hdfilme.zip/ www.hdfilme.today hdfilme.date www.hdfilme.today | |
| SITE_NAME = SITE_IDENTIFIER.upper() | |
| class source: | |
| def __init__(self): | |
| self.priority = 1 | |
| self.language = ['de'] | |
| self.domain = getSetting('provider.' + SITE_IDENTIFIER + '.domain', SITE_DOMAIN) | |
| self.base_link = 'https://' + self.domain | |
| self.search_link = self.base_link + '/index.php?do=search&do=search&subaction=search&search_start=1&full_search=0&result_from=1&titleonly=3&story=%s' | |
| self.checkHoster = True | |
| self.sources = [] | |
| def run(self, titles, year, season=0, episode=0, imdb=''): | |
| sources = [] | |
| try: | |
| t = set([cleantitle.get(i) for i in set(titles) if i]) | |
| links = [] | |
| for sSearchText in titles: | |
| try: | |
| oRequest = cRequestHandler(self.search_link % quote(sSearchText)) | |
| oRequest.cacheTime = 60 * 60 * 24 * 1 | |
| # oRequest = cRequestHandler(self.search_link, caching=True) | |
| # oRequest.addParameters('story', sSearchText) | |
| # oRequest.addParameters('do', 'search') | |
| # oRequest.addParameters('subaction', 'search') | |
| sHtmlContent = oRequest.request() | |
| #pattern = 'class="col-md-li">.*?href="([^"]+).*?title="([^"]+).*?h2>.*?(\d+)' | |
| pattern = '<div class="box-product(.*?)<h3.*?href="([^"]+).*?">([^<]+).*?(.*?)</li>' | |
| isMatch, aResult = cParser.parse(sHtmlContent, pattern) | |
| if not isMatch: | |
| continue | |
| for sDummy, sUrl, sName, sYear in aResult: | |
| sName = sName.strip() | |
| if season == 0: | |
| if cleantitle.get(sName) in t and int(sYear) == year: | |
| links.append(sUrl) | |
| else: | |
| if cleantitle.get(sName.split('-')[0].strip()) in t and str(season) in sName.split('-')[1]: | |
| links.append(sUrl) | |
| break | |
| if len(links) > 0: break | |
| except: | |
| continue | |
| if len(links) == 0: return sources | |
| for url in links: # showHosters | |
| oRequest = cRequestHandler(url) | |
| oRequest.cacheTime = 60 * 60 * 24 * 1 | |
| sHtmlContent = oRequest.request() | |
| quality = '720p' | |
| if season > 0: | |
| pattern = r'Episoden\s%s<.*?</ul>' % episode | |
| isMatch, sHtmlContent = cParser.parseSingleResult(sHtmlContent, pattern) | |
| if not isMatch: return sources | |
| isMatch, aResult = cParser().parse(sHtmlContent, 'link="([^"]+).*?>([^<]+)') | |
| if isMatch: | |
| for sUrl, sName in aResult: | |
| if 'youtube' in sUrl or 'dropload' in sUrl: continue | |
| elif 'Player' in sName: continue | |
| elif sUrl.startswith('/'): sUrl = 'https:' + sUrl | |
| isBlocked, hoster, url, prioHoster = isBlockedHoster(sUrl) | |
| if isBlocked: continue | |
| if url: self.sources.append({'source': hoster, 'quality': 'HD', 'language': 'de', 'url': url, 'direct': True, 'prioHoster': prioHoster}) | |
| return self.sources | |
| except: | |
| return sources | |
| def resolve(self, url): | |
| try: | |
| return url | |
| except: | |
| return |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # edit 2025-06-14 | |
| from resources.lib.utils import isBlockedHoster | |
| import re, random, base64, ast, binascii, json, requests, string, hashlib, pyaes | |
| from resources.lib.control import quote_plus, unquote_plus, infoDialog, urlparse, getSetting | |
| from resources.lib.requestHandler import cRequestHandler | |
| from scrapers.modules import dom_parser, cleantitle | |
| from scrapers.modules.tools import cParser, cUtil | |
| from resources.lib import log_utils | |
| SITE_IDENTIFIER = 'kinoger' | |
| SITE_DOMAIN = 'kinoger.com' | |
| SITE_NAME = SITE_IDENTIFIER.upper() | |
| class source: | |
| def __init__(self): | |
| self.priority = 1 | |
| self.language = ['de'] | |
| self.domain = getSetting('provider.' + SITE_IDENTIFIER + '.domain', SITE_DOMAIN) | |
| self.base_link = 'https://' + self.domain | |
| self.search = self.base_link + '/index.php?do=search&subaction=search&search_start=1&full_search=0&result_from=1&titleonly=3&story=%s' | |
| self.sources = [] | |
| #self.search = self.base_link + '?do=search&subaction=search&titleonly=3&story=%s&x=5&y=11&submit=submit' | |
| # http://kinoger.to/index.php?do=search&subaction=search&search_start=1&full_search=0&result_from=1&titleonly=3&story=Captain%20Marvel | |
| def run(self, titles, year, season=0, episode=0, imdb='', hostDict=None): | |
| sources = [] | |
| items = [] | |
| url = '' | |
| try: | |
| t = [cleantitle.get(i) for i in titles if i] | |
| years = [str(year), str(year + 1)] if season == 0 else [''] | |
| for title in titles: | |
| try: | |
| sUrl = self.search % title | |
| oRequest = cRequestHandler(sUrl) | |
| oRequest.removeBreakLines(False) | |
| oRequest.removeNewLines(False) | |
| oRequest.cacheTime = 60 * 60 * 12 | |
| sHtmlContent = oRequest.request() | |
| search_results = dom_parser.parse_dom(sHtmlContent, 'div', attrs={'class': 'title'}) | |
| search_results = dom_parser.parse_dom(search_results, 'a') | |
| search_results = [(i.attrs['href'], i.content) for i in search_results] | |
| search_results = [(i[0], re.findall('(.*?)\((\d+)', i[1])[0]) for i in search_results] | |
| if season > 0: | |
| for x in range(0, len(search_results)): | |
| title = cleantitle.get(search_results[x][1][0]) | |
| if 'staffel' in title and any(k in title for k in t): | |
| url = search_results[x][0] | |
| else: | |
| for x in range(0, len(search_results)): | |
| title = cleantitle.get(search_results[x][1][0]) | |
| if any(k in title for k in t) and search_results[x][1][1] in years: | |
| url = search_results[x][0] | |
| break | |
| if url != '': break | |
| except: | |
| pass | |
| if url == '': return sources | |
| oRequest = cRequestHandler(url) | |
| oRequest.cacheTime = 60 * 60 * 12 | |
| sHtmlContent = oRequest.request() | |
| quali = re.findall('title="Stream.(.+?)"', sHtmlContent) | |
| links = re.findall('.show.+?,(\[\[.+?\]\])', sHtmlContent) | |
| if len(links) == 0: return sources | |
| if season > 0 and episode > 0: | |
| season = season - 1 | |
| episode = episode - 1 | |
| for i in range(0, len(links)): | |
| if 'playerx' in links[i]: continue #ka temp off | |
| elif 'kinoger.ru' in links[i]: continue | |
| elif 'wolfstream.tv' in links[i]: continue # Offline | |
| direct = True | |
| pw = ast.literal_eval(links[i]) | |
| url = (pw[season][episode]).strip() | |
| isBlocked, hoster, url, prioHoster = isBlockedHoster(url, isResolve=False) | |
| if isBlocked: direct = False | |
| quality = quali[i] | |
| if quality == '': quality = 'SD' | |
| if quality == 'HD': quality = '720p' | |
| if quality == 'HD+': quality = '1080p' | |
| items.append({'source': hoster, 'quality': quality, 'url': url, 'direct': direct, 'prioHoster': prioHoster}) | |
| headers = '&Accept-Language=de%2Cen-US%3Bq%3D0.7%2Cen%3Bq%3D0.3&Accept=%2A%2F%2A&User-Agent=Mozilla%2F5.0+%28Windows+NT+10.0%3B+Win64%3B+x64%3B+rv%3A99.0%29+Gecko%2F20100101+Firefox%2F99.0' | |
| for item in items: | |
| try: | |
| if item['source'] == 'kinoger.ru': #continue | |
| def content_decryptor(html_content, passphrase): | |
| match = re.compile(r'''JScripts = '(.+?)';''', re.DOTALL).search(html_content) | |
| if match: | |
| # Parse the JSON string | |
| json_obj = json.loads(match.group(1)) | |
| # Extract the salt, iv, and ciphertext from the JSON object | |
| salt = binascii.unhexlify(json_obj["s"]) | |
| iv = binascii.unhexlify(json_obj["iv"]) | |
| ct = base64.b64decode(json_obj["ct"]) | |
| # Concatenate the passphrase and the salt | |
| concated_passphrase = passphrase.encode() + salt | |
| # Compute the MD5 hashes | |
| md5 = [hashlib.md5(concated_passphrase).digest()] | |
| result = md5[0] | |
| i = 1 | |
| while len(result) < 32: | |
| md5.append(hashlib.md5(md5[i - 1] + concated_passphrase).digest()) | |
| result += md5[i] | |
| i += 1 | |
| # Extract the key from the result | |
| key = result[:32] | |
| # Decrypt the ciphertext using AES-256-CBC | |
| aes = pyaes.AESModeOfOperationCBC(key, iv) | |
| decrypter = pyaes.Decrypter(aes) | |
| plain_text = decrypter.feed(ct) | |
| plain_text += decrypter.feed() | |
| # Return the decrypted data as a JSON object | |
| return json.loads(plain_text.decode()) | |
| else: | |
| return None | |
| sUrl = item['url'] | |
| oRequest = cRequestHandler(sUrl, caching=False, ignoreErrors=True) | |
| oRequest.addHeaderEntry('Referer', 'https://kinoger.com/') | |
| sHtmlContent = oRequest.request() | |
| decryptHtmlContent = content_decryptor(sHtmlContent, 'H&5+Tx_nQcdK{U,.') # Decrypt Content | |
| isMatch, hUrl = cParser.parseSingleResult(decryptHtmlContent, 'sources.*?file.*?(http[^"]+)') | |
| if isMatch: | |
| hUrl = hUrl.replace('\\', '') | |
| oRequest = cRequestHandler(hUrl, caching=False, ignoreErrors=True) | |
| oRequest.addHeaderEntry('Referer', 'https://kinoger.ru/') | |
| oRequest.addHeaderEntry('Origin', 'https://kinoger.ru') | |
| oRequest.removeNewLines(False) | |
| sHtmlContent = oRequest.request() | |
| pattern = r'RESOLUTION=\d+x(\d+).*?\n([^#"]+)' | |
| isMatch, aResult = cParser.parse(sHtmlContent, pattern) | |
| if not isMatch: continue | |
| for sQualy, sUrl in aResult: | |
| sUrl = (hUrl.split('video')[0].strip() + sUrl.strip()) | |
| sUrl = sUrl + '|Origin=https%3A%2F%2Fkinoger.ru&Referer=https%3A%2F%2Fkinoger.ru%2F' + headers | |
| #hoster = {'link': sUrl, 'name': 'Kinoger.ru ' + sQualy, 'resolveable': True} | |
| sources.append({'source': item['source'], 'quality': sQualy + 'p', 'language': 'de', 'url': sUrl, 'direct': item['direct']}) | |
| elif 'kinoger.be' in item['source']: # One Piece Film: Red / Elemental (2023) / Thanksgiving 2023 | |
| url = item['url'] | |
| # url = url.replace('kinoger.be', 'streamhide.to') | |
| # sources.append({'source': item['source'], 'quality': item['quality'], 'language': 'de', 'url': url, 'direct': False}) | |
| oRequest = cRequestHandler(url, ignoreErrors=True) | |
| oRequest.addHeaderEntry('Referer', 'https://kinoger.com/') | |
| sHtmlContent = oRequest.request() | |
| sHtmlContent += cUtil.get_packed_data(sHtmlContent) | |
| pattern = r'''sources:\s*\[{file:\s*["'](?P<url>[^"']+)''' | |
| isMatch, sUrl = cParser.parseSingleResult(sHtmlContent, pattern) | |
| if not isMatch: continue | |
| sMaster_m3u8 = cRequestHandler(sUrl).request() | |
| pattern = 'RESOLUTION=([^,]+).*?(index.*?.m3u8)' | |
| isMatch, aResult = cParser.parse(sMaster_m3u8, pattern) | |
| if not isMatch: continue | |
| for sQualy, sIndex in aResult: | |
| sQualy = self._quality(sQualy) | |
| hUrl = sUrl.replace('master.m3u8',sIndex ) | |
| sources.append({'source': item['source'], 'quality': sQualy, 'language': 'de', 'url': hUrl, 'direct': item['direct'], 'prioHoster': 50}) | |
| else: | |
| isBlocked, hoster, url, prioHoster = isBlockedHoster(item['url'], isResolve=True) | |
| if isBlocked: continue | |
| sources.append({'source': item['source'], 'quality': item['quality'], 'language': 'de','url': url, 'direct': item['direct'], 'prioHoster': item['prioHoster']}) | |
| except: | |
| continue | |
| if len(sources) == 0: | |
| log_utils.log('Kinoger: kein Provider - %s ' % titles[0], log_utils.LOGINFO) | |
| else: | |
| for source in sources: | |
| if source not in self.sources: self.sources.append(source) | |
| return self.sources | |
| except: | |
| return sources | |
| def resolve(self, url): | |
| try: | |
| return url | |
| except: | |
| return | |
| def _quality(self, q): # Kinoger.be Quality | |
| hl = q.split('x') | |
| h = int(hl[0]) | |
| l = int(hl[1]) | |
| if h >= 1920: return '1080p' | |
| elif l >= 720 or h >= 1080: return '720p' | |
| else: return 'SD' |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # kinokiste | |
| # 2023-01-27 | |
| # edit 2024-12-14 | |
| from resources.lib.utils import isBlockedHoster | |
| from scrapers.modules.tools import cParser | |
| from resources.lib.requestHandler import cRequestHandler | |
| from scrapers.modules import cleantitle | |
| from resources.lib.control import getSetting, setSetting, urljoin | |
| SITE_IDENTIFIER = 'kinokiste' | |
| SITE_DOMAIN = 'kkiste-io.top' | |
| SITE_NAME = SITE_IDENTIFIER.upper() | |
| class source: | |
| def __init__(self): | |
| self.priority = 1 | |
| self.language = ['de'] | |
| self.domain = getSetting('provider.' + SITE_IDENTIFIER + '.domain', SITE_DOMAIN) | |
| self.base_link = 'https://' + self.domain | |
| # self.search_link = self.base_link + '/index.php?do=search&subaction=search&titleonly=3&story=%s' | |
| self.search_link = self.base_link + '/?do=search&subaction=search&titleonly=3&story=%s' | |
| self.sources = [] | |
| def run(self, titles, year, season=0, episode=0, imdb='', hostDict=None): | |
| sources = [] | |
| t = set([cleantitle.get(i) for i in set(titles) if i]) | |
| #years = (year, year+1, year-1, 0) | |
| links = [] | |
| for sSearchText in titles: | |
| try: | |
| oRequest = cRequestHandler(self.search_link % sSearchText) | |
| # oRequest.addParameters('do', 'search') | |
| # oRequest.addParameters('subaction', 'search') | |
| # oRequest.addParameters('story', sSearchText) | |
| # oRequest.addParameters('titleonly', '3') | |
| sHtmlContent = oRequest.request() | |
| pattern = r'class="new_movie\d+">\s*<a\s+href="([^"]+)">[^<]*</a>.*?alt="([^"]+)".*?class="fl-quality[^"]+">([^<]+)' | |
| isMatch, aResult = cParser.parse(sHtmlContent, pattern) | |
| if not isMatch: continue | |
| for i in aResult: | |
| if season == 0: | |
| if cleantitle.get(i[1]) in t: # sName | |
| if i not in links: links.append(i) | |
| else: | |
| if cleantitle.get(i[1].split('-')[0].strip()) in t and str(season) in i[1].split('-')[1]: | |
| if i not in links: links.append(i) | |
| if len(links) > 0: break | |
| except: | |
| continue | |
| if len(links) == 0: return sources | |
| elif len(links) >= 1: | |
| for link in links: | |
| self.getStreams(link, year, season, episode, hostDict) | |
| return self.sources | |
| def getStreams(self, data, year, season, episode, hostDict): | |
| sHtmlContent = cRequestHandler(data[0]).request() | |
| isMatch, aYear = cParser.parse(sHtmlContent, r'l-year">(\d+)') | |
| if not int(aYear[0]) == year and season == 0: return | |
| if season == 0: pattern = r'<a\s+href="#"\s+data-link="([^"]+)' | |
| else: pattern = r'<a\s+href="#"\s+id="[^"]+_%s"\s+data-link="([^"]+)' % episode | |
| isMatch, aResult = cParser.parse(sHtmlContent, pattern) | |
| if not isMatch: return | |
| for i in aResult: | |
| sUrl = i | |
| if sUrl.startswith('/'): sUrl = urljoin('https:', sUrl) | |
| isBlocked, hoster, url, prioHoster = isBlockedHoster(sUrl) | |
| if isBlocked: continue | |
| if url: self.sources.append({'source': hoster, 'quality': data[2], 'language': 'de', 'url': url, 'direct': True, 'prioHoster': prioHoster}) | |
| def resolve(self, url): | |
| try: | |
| return url | |
| except: | |
| return |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # 2022-01-09 | |
| # edit 2025-06-17 | |
| from resources.lib.utils import isBlockedHoster | |
| import json | |
| import re | |
| import time | |
| import requests | |
| from scrapers.modules.tools import cParser # re - alternative | |
| from resources.lib.requestHandler import cRequestHandler | |
| from resources.lib.control import urlparse, quote_plus, urljoin, parse_qs, getSetting, setSetting | |
| from scrapers.modules import cleantitle, dom_parser, source_utils | |
| # kinox | |
| SITE_IDENTIFIER = 'kinox' | |
| SITE_DOMAIN = 'kinoz.to' | |
| SITE_NAME = SITE_IDENTIFIER.upper() | |
| class source: | |
| def __init__(self): | |
| self.priority = 1 | |
| self.language = ['de'] | |
| self.domains, self.base_link = self.getdomain() | |
| #self.base_link = self._base_link | |
| self.search_link = self.base_link +'/Search.html?q=%s' | |
| self.get_links_epi = '/aGET/MirrorByEpisode/?Addr=%s&SeriesID=%s&Season=%s&Episode=%s' | |
| self.mirror_link = '/aGET/Mirror/%s&Hoster=%s&Mirror=%s' | |
| self.checkHoster = False if getSetting('provider.kinox.checkHoster') == 'false' else True | |
| self.sources = [] | |
| def getdomain(self, check=False): | |
| if getSetting('kinox.base_link') and check == False: return [getSetting('kinox.domain')], getSetting('kinox.base_link') | |
| domains = ['kinox.PUB', 'kinox.FUN', 'kinox.CLICK', 'kinox.AM', 'kinoS.TO', 'kinox.DIGITAL', 'KinoX.to', 'kinos.to', 'kinox.EXPRESS', | |
| 'kinox.SG', 'kinox.sh', 'kinox.GRATIS', 'kinox.WTF', 'kinox.tv', 'kinox.BZ', 'kinox.MOBI', 'kinox.TV', 'kinox.to', 'www12.kinos.to', | |
| 'kinox.LOL', 'kinox.FYI', 'kinox.CLOUD', 'kinox.DIRECT', 'kinox.SH', 'kinox.CLUB', 'kinoz.TO', 'ww8.kinox.to'] | |
| for i in range(18, 22): | |
| domain = 'www%s.kinoz.to' % i | |
| domains.insert(0, domain) | |
| for domain in domains: | |
| try: | |
| url = 'http://%s' % domain | |
| resp = requests.get(url) | |
| url = resp.url | |
| if resp.status_code == 200: | |
| r = dom_parser.parse_dom(resp.text, 'meta', attrs={'name': 'keywords'}, req='content') | |
| if r and 'kinox.to' in r[0].attrs.get('content').lower(): | |
| setSetting('kinox.domain', urlparse(url).netloc) | |
| setSetting('kinox.base_link', url[:-1]) | |
| if check: | |
| self.domains = [urlparse(url).netloc] | |
| self.base_link = url[:-1] | |
| return self.domains, self.base_link | |
| return [urlparse(url).netloc], url[:-1] | |
| except: | |
| pass | |
| def run(self, titles, year, season=0, episode=0, imdb='', hostDict=None): | |
| url = '' | |
| t = [cleantitle.get(i) for i in set(titles) if i] | |
| for title in titles: | |
| try: | |
| query = self.search_link % (quote_plus(title)) | |
| oRequest = cRequestHandler(query) | |
| sHtmlContent = oRequest.request() | |
| if not sHtmlContent: | |
| self.getdomain(True) | |
| query = self.search_link % (quote_plus(title)) | |
| sHtmlContent = cRequestHandler(query).request() | |
| r = dom_parser.parse_dom(sHtmlContent, 'table', attrs={'id': 'RsltTableStatic'}) | |
| r = dom_parser.parse_dom(r, 'tr') | |
| r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom(i, 'img', attrs={'alt': 'language'}, req='src'), dom_parser.parse_dom(i, 'span')) for i in r] | |
| r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0].attrs['src'], i[2][0].content) for i in r if i[0] and i[1]] | |
| if season: | |
| r = [(i[0], i[1], re.findall('.+?(\d+)\.', i[2]), i[3]) for i in r] | |
| if r == []: continue | |
| else: | |
| r = [(i[0], i[1], re.findall('.+?(\d+)\.', i[2]), i[3]) for i in r if i[3] == str(year)] | |
| if r == []: continue | |
| r = [(i[0], i[1], i[2][0] if len(i[2]) > 0 else '0', i[3]) for i in r] | |
| r = sorted(r, key=lambda i: int(i[2])) # german > german/subbed | |
| r = [i[0] for i in r if i[2] in ['1', '15'] and cleantitle.get(i[1]) in t] | |
| if len(r) == 0: | |
| continue | |
| else: | |
| url = urljoin(self.base_link,r[0]) | |
| break | |
| except: | |
| pass | |
| try: | |
| if not url: | |
| return sources | |
| oRequest = cRequestHandler(url) | |
| sHtmlContent = oRequest.request() | |
| if season and episode: | |
| r = dom_parser.parse_dom(sHtmlContent, 'select', attrs={'id': 'SeasonSelection'}, req='rel')[0] | |
| r = source_utils.replaceHTMLCodes(r.attrs['rel'])[1:] | |
| r = parse_qs(r) | |
| r = dict([(i, r[i][0]) if r[i] else (i, '') for i in r]) | |
| r = urljoin(self.base_link, self.get_links_epi % (r['Addr'], r['SeriesID'], season, episode)) | |
| oRequest = cRequestHandler(r) | |
| sHtmlContent = oRequest.request() | |
| r = dom_parser.parse_dom(sHtmlContent, 'ul', attrs={'id': 'HosterList'})[0] | |
| r = dom_parser.parse_dom(r, 'li', attrs={'id': re.compile(r'Hoster_\d+')}, req='rel') | |
| r = [(source_utils.replaceHTMLCodes(i.attrs['rel']), i.content) for i in r if i[0] and i[1]] | |
| r = [(i[0], re.findall('class="Named"[^>]*>([^<]+).*?(\d+)/(\d+)', i[1])) for i in r] | |
| r = [(i[0], i[1][0][0].lower().rsplit('.', 1)[0], i[1][0][2]) for i in r if len(i[1]) > 0] | |
| for link, hoster, mirrors in r: | |
| try: | |
| u = parse_qs('&id=%s' % link) | |
| u = dict([(x, u[x][0]) if u[x] else (x, '') for x in u]) | |
| for x in range(0, int(mirrors)): | |
| tempLink = self.mirror_link % (u['id'], u['Hoster'], x + 1) | |
| if season and episode: tempLink += "&Season=%s&Episode=%s" % (season, episode) | |
| url = urljoin(self.base_link, tempLink) | |
| oRequest = cRequestHandler(url) | |
| sHtmlContent = oRequest.request() | |
| if len(sHtmlContent) < 20: | |
| time.sleep(1) # ka - Abfrage verzögern! Workaround - 2x - so geht es | |
| oRequest = cRequestHandler(url) | |
| sHtmlContent = oRequest.request() | |
| r = json.loads(sHtmlContent)['Stream'] | |
| r = [(dom_parser.parse_dom(r, 'a', req='href'), dom_parser.parse_dom(r, 'iframe', req='src'))] | |
| r = [i[0][0].attrs['href'] if i[0] else i[1][0].attrs['src'] for i in r if i[0] or i[1]][0] | |
| if not r.startswith('http'): r = urljoin('https:', r) | |
| isBlocked, hoster, url, prioHoster = isBlockedHoster(r) | |
| if isBlocked: continue | |
| if url: self.sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': True, 'prioHoster': prioHoster, 'info': 'Mirror ' + str(x+1)}) | |
| except: | |
| pass | |
| return self.sources | |
| except: | |
| return self.sources | |
| def resolve(self, url): | |
| return url | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # kkiste | |
| # 2023-01-28 | |
| # edit 2024-12-14 | |
| from resources.lib.utils import isBlockedHoster | |
| import re | |
| from scrapers.modules.tools import cParser # re - alternative | |
| from resources.lib.requestHandler import cRequestHandler | |
| from scrapers.modules import cleantitle, dom_parser | |
| from resources.lib.control import getSetting, setSetting, urljoin | |
| SITE_IDENTIFIER = 'kkiste' | |
| SITE_DOMAIN = 'kkiste-to.com' # kkiste.pro | |
| SITE_NAME = SITE_IDENTIFIER.upper() | |
| class source: | |
| def __init__(self): | |
| self.priority = 1 | |
| self.language = ['de'] | |
| self.domain = getSetting('provider.' + SITE_IDENTIFIER + '.domain', SITE_DOMAIN) | |
| self.base_link = 'https://' + self.domain | |
| # self.search_link = urljoin(self.base_link, '/index.php?do=search')# /index.php?do=search&subaction=search&story= | |
| self.search_link = urljoin(self.base_link, '/index.php?do=search&subaction=search&titleonly=3&story=%s') | |
| self.sources = [] | |
| def run(self, titles, year, season=0, episode=0, imdb='', hostDict=None): | |
| sources = [] | |
| try: | |
| t = set([cleantitle.get(i) for i in set(titles) if i]) | |
| years = (year, year+1, year-1, 0) | |
| links = [] | |
| for sSearchText in titles: | |
| try: | |
| oRequest = cRequestHandler(self.search_link % sSearchText) | |
| sHtmlContent = oRequest.request() | |
| pattern = r'class="short">.*?href="([^"]+)">([^<]+).*?Jahr:.*?([\d]+)<' | |
| isMatch, aResult = cParser.parse(sHtmlContent, pattern) | |
| if not isMatch: | |
| continue | |
| for sUrl, sName, sYear in aResult: | |
| if season == 0: | |
| if cleantitle.get(sName) in t and int(sYear) in years: | |
| if sUrl not in links: links.append(sUrl) | |
| else: | |
| if cleantitle.get(sName.split('-')[0].strip()) in t and str(season) in sName.split('-')[1]: | |
| if sUrl not in links: links.append(sUrl) | |
| if len(links) > 0: break | |
| except: | |
| continue | |
| if len(links) == 0: return sources | |
| for link in set(links): | |
| sHtmlContent = cRequestHandler(link).request() | |
| if season > 0: | |
| pattern = r'\s%s<.*?</ul>' % episode | |
| isMatch, sHtmlContent = cParser.parseSingleResult(sHtmlContent, pattern) | |
| if not isMatch: return sources | |
| isMatch, aResult = cParser().parse(sHtmlContent, 'link="([^"]+)">') | |
| if not isMatch: return sources | |
| for sUrl in aResult: | |
| if 'youtube'in sUrl or 'vod'in sUrl: continue | |
| if sUrl.startswith('/'): sUrl = re.sub('//', 'https://', sUrl) | |
| if sUrl.startswith('/'): sUrl = 'https:/' + sUrl | |
| isBlocked, hoster, url, prioHoster = isBlockedHoster(sUrl) | |
| if isBlocked: continue | |
| if url: self.sources.append({'source': hoster, 'quality': 'HD', 'language': 'de', 'url': url, 'direct': True, 'prioHoster': prioHoster}) | |
| return self.sources | |
| except: | |
| return self.sources | |
| def resolve(self, url): | |
| try: | |
| return url | |
| except: | |
| return |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #2021-01-26 | |
| #edit 2023-02-24 | |
| import time | |
| import inspect | |
| import cProfile | |
| try: | |
| from StringIO import StringIO | |
| from xbmc import LOGNOTICE as LOGINFO | |
| except: | |
| from io import StringIO | |
| from xbmc import LOGINFO | |
| try: | |
| basestring | |
| except NameError: | |
| basestring = str | |
| import pstats | |
| import json | |
| import xbmc | |
| from xbmc import LOGDEBUG, LOGERROR, LOGFATAL, LOGWARNING | |
| from resources.lib import control | |
| name = control.addonInfo('name') | |
| DEBUGPREFIX = '[ '+name+' DEBUG ]' | |
| def log(msg, level=LOGDEBUG, caller=None): | |
| #if control.getSetting('addon_debug') == 'true' and level == LOGDEBUG: | |
| #level = LOGNOTICE | |
| try: | |
| if caller is not None and level == LOGDEBUG: | |
| func = inspect.currentframe().f_back.f_code | |
| line_number = inspect.currentframe().f_back.f_lineno | |
| caller = "%s.%s()" % (caller, func.co_name) | |
| msg = 'From func name: %s Line # :%s\n msg : %s' % (caller, line_number, msg) | |
| if caller is not None and level == LOGERROR: | |
| msg = 'From func name: %s.%s() Line # :%s\n msg : %s'%(caller[0], caller[1], caller[2], msg) | |
| #TODO | |
| # import pydevd | |
| # pydevd.settrace('localhost', port=12345, stdoutToServer=True, stderrToServer=True) | |
| # if isinstance(msg, type(u"")): | |
| # msg = '%s (ENCODED)' % (msg.encode('utf-8')) | |
| if not isinstance(msg, str): | |
| msg = str(msg) | |
| xbmc.log('%s: %s' % (DEBUGPREFIX, msg), level) | |
| except Exception as e: | |
| try: | |
| xbmc.log('Logging Failure: %s' % (e), level) | |
| except: | |
| pass # just give up | |
| def error(message=None, exception=True): | |
| try: | |
| import sys | |
| if exception: | |
| type, value, traceback = sys.exc_info() | |
| sysaddon = sys.argv[0].split('//')[1].replace('/', '.') | |
| filename = (traceback.tb_frame.f_code.co_filename).replace('\\', '.').replace('.py', '') | |
| filename = filename.split(sysaddon)[1].replace('\\', '.') | |
| name = traceback.tb_frame.f_code.co_name | |
| linenumber = traceback.tb_lineno | |
| errortype = type.__name__ | |
| errormessage = str(value) | |
| if errormessage == '': | |
| raise Exception() | |
| if message: | |
| message += ' -> ' | |
| else: | |
| message = '' | |
| message += str(errortype) + ' -> ' + str(errormessage) | |
| caller = [filename, name, linenumber] | |
| else: | |
| caller = None | |
| log(msg=message, level = LOGERROR, caller=caller) | |
| except: | |
| pass | |
| class Profiler(object): | |
| def __init__(self, file_path, sort_by='time', builtins=False): | |
| self._profiler = cProfile.Profile(builtins=builtins) | |
| self.file_path = file_path | |
| self.sort_by = sort_by | |
| def profile(self, f): | |
| def method_profile_on(*args, **kwargs): | |
| try: | |
| self._profiler.enable() | |
| result = self._profiler.runcall(f, *args, **kwargs) | |
| self._profiler.disable() | |
| return result | |
| except Exception as e: | |
| log('Profiler Error: %s' % (e), LOGWARNING) | |
| return f(*args, **kwargs) | |
| def method_profile_off(*args, **kwargs): | |
| return f(*args, **kwargs) | |
| if _is_debugging(): | |
| return method_profile_on | |
| else: | |
| return method_profile_off | |
| def __del__(self): | |
| self.dump_stats() | |
| def dump_stats(self): | |
| if self._profiler is not None: | |
| s = StringIO() | |
| params = (self.sort_by,) if isinstance(self.sort_by, basestring) else self.sort_by | |
| ps = pstats.Stats(self._profiler, stream=s).sort_stats(*params) | |
| ps.print_stats() | |
| if self.file_path is not None: | |
| with open(self.file_path, 'w') as f: | |
| f.write(s.getvalue()) | |
| def trace(method): | |
| def method_trace_on(*args, **kwargs): | |
| start = time.time() | |
| result = method(*args, **kwargs) | |
| end = time.time() | |
| log('{name!r} time: {time:2.4f}s args: |{args!r}| kwargs: |{kwargs!r}|'.format(name=method.__name__, time=end - start, args=args, kwargs=kwargs), LOGDEBUG) | |
| return result | |
| def method_trace_off(*args, **kwargs): | |
| return method(*args, **kwargs) | |
| if _is_debugging(): | |
| return method_trace_on | |
| else: | |
| return method_trace_off | |
| def _is_debugging(): | |
| command = {'jsonrpc': '2.0', 'id': 1, 'method': 'Settings.getSettings', 'params': {'filter': {'section': 'system', 'category': 'logging'}}} | |
| js_data = execute_jsonrpc(command) | |
| for item in js_data.get('result', {}).get('settings', {}): | |
| if item['id'] == 'debug.showloginfo': | |
| return item['value'] | |
| return False | |
| def execute_jsonrpc(command): | |
| if not isinstance(command, basestring): | |
| command = json.dumps(command) | |
| response = control.jsonrpc(command) | |
| return json.loads(response) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # megakino | |
| # 2022-07-19 | |
| # edit 2024-12-14 | |
| from resources.lib.utils import isBlockedHoster | |
| import re | |
| from scrapers.modules.tools import cParser # re - alternative | |
| from resources.lib.requestHandler import cRequestHandler | |
| from scrapers.modules import cleantitle, dom_parser | |
| from resources.lib.control import getSetting, quote, quote_plus | |
| SITE_IDENTIFIER = 'megakino' | |
| SITE_DOMAIN = 'megakino.live' | |
| SITE_NAME = SITE_IDENTIFIER.upper() | |
| class source: | |
| def __init__(self): | |
| self.priority = 1 | |
| self.language = ['de'] | |
| self.domain = getSetting('provider.' + SITE_IDENTIFIER + '.domain', SITE_DOMAIN) | |
| self.base_link = 'https://' + self.domain | |
| self.sources = [] | |
| def run(self, titles, year, season=0, episode=0, imdb='', hostDict=None): | |
| sources = [] | |
| try: | |
| t = set([cleantitle.get(i) for i in set(titles) if i]) | |
| #years = (year, year+1, year-1, 0) | |
| links = [] | |
| for sSearchText in titles: | |
| try: | |
| # url = self.search_link % (sSearchText) | |
| oRequest = cRequestHandler(self.base_link) | |
| oRequest.addParameters('do', 'search') | |
| oRequest.addParameters('subaction', 'search') | |
| #oRequest.addParameters('search_start', '0') | |
| #oRequest.addParameters('full_search', '0') | |
| #oRequest.addParameters('result_from', '1') | |
| oRequest.addParameters('story', quote_plus(sSearchText)) | |
| # oRequest.addParameters('titleonly', '3') | |
| sHtmlContent = oRequest.request() | |
| r = dom_parser.parse_dom(sHtmlContent, 'div', attrs={'id': 'dle-content'})[0].content | |
| #a = dom_parser.parse_dom(r, 'a') | |
| #pattern = '<a\s+class=[^>]*href="([^"]+)">.*?alt="([^"]+)">\s*<div\s+class="poster__label">([^<]+).*?<li>.*?(\d{4}).*?</a>' | |
| if season != 0:pattern = r'<a\s+class="poster[^>]*href="([^"]+).*?alt="([^"]+)' | |
| else: pattern = r'<a\s+class="poster[^>]*href="([^"]+).*?alt="([^"]+)">.*?<li>.*?(\d{4}).*?</a>' | |
| isMatch, aResult = cParser.parse(r, pattern) | |
| if not isMatch: continue | |
| if season == 0: | |
| for sUrl, sName, sYear in aResult: # sUrl, sName, sQuality, sYear | |
| if not int(sYear) == year: continue | |
| #if '1080' in sQuality: sQuality = '1080p' | |
| if cleantitle.get(sName) in t: | |
| links.append({'url': sUrl, 'name': sName, 'quality': 'HD', 'year': sYear}) | |
| elif season > 0: | |
| for sUrl, sName in aResult: | |
| sYear = '' | |
| if cleantitle.get(sName.split('- S')[0].strip()) in t and str(season) in sName.split('- S')[1]: | |
| links.append({'url': sUrl, 'name': sName.split('- S')[0].strip(), 'quality': 'HD', 'year': sYear}) | |
| if len(links) == 0 and season == 0: | |
| for sUrl, sName, sYear in aResult: | |
| if not int(sYear) == year: continue | |
| #if '1080' in sQuality: sQuality = '1080p' | |
| for a in t: | |
| if any([a in cleantitle.get(sName)]): | |
| links.append({'url': sUrl, 'name': sName, 'quality': 'HD', 'year': sYear}) | |
| break | |
| if len(links) > 0: break | |
| except: | |
| continue | |
| if len(links) == 0: return sources | |
| for link in links: | |
| sHtmlContent = cRequestHandler(link['url']).request() | |
| if season > 0: | |
| self.quality = link['quality'] | |
| pattern = r'<select\s+name="pmovie__select-items"\s+class="[^"]+"\s+style="[^"]+"\s+id="ep%s">\s*(.*?)\s*</select>' % str(episode) | |
| isMatch, sHtmlContent = cParser.parseSingleResult(sHtmlContent, pattern) | |
| isMatch, aResult = cParser().parse(sHtmlContent, 'value="([^"]+)') | |
| if not isMatch: return sources | |
| else: | |
| pattern = 'poster__label">([^/|<]+)' | |
| isMatch, sQuality = cParser.parseSingleResult(sHtmlContent, pattern) | |
| if '1080' in sQuality: sQuality = '1080p' | |
| quality = sQuality if isMatch else link['quality'] | |
| # pattern = '<iframe\s+id="film_main"\s+data-src="([^"]+)"' | |
| pattern = r'<iframe.*?src=(?:"|)([^"|\s]+)' | |
| isMatch, aResult = cParser().parse(sHtmlContent, pattern) | |
| if not isMatch: return sources | |
| for sUrl in aResult: | |
| if sUrl.startswith('/'): sUrl = re.sub('//', 'https://', sUrl) | |
| if sUrl.startswith('/'): sUrl = 'https:/' + sUrl | |
| isBlocked, hoster, url, prioHoster = isBlockedHoster(sUrl) | |
| if isBlocked: continue | |
| if url: self.sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': url, 'direct': True, 'prioHoster': prioHoster}) | |
| return self.sources | |
| except: | |
| return self.sources | |
| def resolve(self, url): | |
| try: | |
| return url | |
| except: | |
| return |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #2021-11-22 | |
| #edit 2025-03-02 | |
| import sys, re | |
| import hashlib,os,codecs | |
| from sqlite3 import dbapi2 as database | |
| import xbmc, xbmcplugin | |
| from resources.lib.control import py2_encode, translatePath, executebuiltin | |
| from resources.lib import log_utils, control, playcountDB | |
| try: | |
| import xmlrpclib as _xmlrpclib | |
| from StringIO import StringIO as _io | |
| except: | |
| import xmlrpc.client as _xmlrpclib | |
| from io import BytesIO as _io | |
| # eventuell zur späteren verwendung als meta | |
| #_params = dict(parse_qsl(sys.argv[2].replace('?',''))) if len(sys.argv) > 1 else dict() | |
| class player(xbmc.Player): | |
| def __init__(self, *args, **kwargs): | |
| xbmc.Player.__init__(self, *args, **kwargs) | |
| self.streamFinished = False | |
| self.totalTime = 0 | |
| self.currentTime = 0 | |
| self.playcount = 0 | |
| self.watcher_control = False | |
| self.isdebug = True if control.getSetting('status.debug') == 'true' else False | |
| def run(self, title, url, meta): | |
| import xbmc | |
| try: | |
| self.meta = meta | |
| self.mediatype = meta['mediatype'] | |
| self.title = meta['title'] | |
| self.year = str(meta['year']) if 'year' in meta else '' | |
| if meta['mediatype'] == 'movie': | |
| self.name = title + ' (%s)' % meta['year'] if meta.get('year', False) else title | |
| else: | |
| self.name = title + ' S%02dE%02d' % (int(meta['season']), int(meta['episode'])) | |
| if control.is_python2 and type(self.name) != unicode: | |
| self.name = self.name.decode('utf-8') | |
| self.imdb = meta['imdb_id'] if 'imdb_id' in meta else None | |
| self.number_of_seasons = meta['number_of_seasons'] if 'number_of_seasons' in meta else None | |
| self.season = meta['season'] if 'season' in meta else None | |
| self.number_of_episodes = meta['number_of_episodes'] if 'number_of_episodes' in meta else None | |
| self.episode = meta['episode'] if 'episode' in meta else None | |
| self.playcount = meta['playcount'] if 'playcount' in meta else 0 | |
| self.offset = bookmarks().get(self.name, self.year) | |
| from glob import glob | |
| os.chdir(os.path.join(control.translatePath('special://database/'))) | |
| self.videoDB = os.path.join(control.translatePath('special://database/'), sorted(glob("MyVideos*.db"), reverse=True)[0]) | |
| self.fileID = self.getVideoDB() | |
| plot = control.unquote(meta['plot']) if 'plot' in meta else '' | |
| Info = {'plot': plot} | |
| Info.setdefault('IMDBNumber', meta['imdbnumber']) | |
| if meta['mediatype'] == 'movie': | |
| Info.setdefault('OriginalTitle', meta['title']) | |
| Info.setdefault('year', meta['year']) | |
| else: | |
| Info.setdefault('TVshowtitle', meta['title']) | |
| Info.setdefault('Season', self.season) | |
| Info.setdefault('Episode', self.episode) | |
| item = control.item(label=self.name) | |
| # TS: video/mp2t | |
| # HLS: application/x-mpegURL or application/vnd.apple.mpegurl | |
| # Dash: application/dash+xml | |
| kodiver = int(xbmc.getInfoLabel("System.BuildVersion").split(".")[0]) | |
| if ".m3u" in url or '.mpd' in url: | |
| item.setProperty("inputstream", "inputstream.adaptive") | |
| if '.mpd' in url: | |
| if kodiver < 21: item.setProperty('inputstream.adaptive.manifest_type', 'mpd') | |
| item.setMimeType('application/dash+xml') | |
| else: | |
| if kodiver < 21: item.setProperty('inputstream.adaptive.manifest_type', 'hls') | |
| # item.setMimeType("application/vnd.apple.mpegurl") | |
| item.setMimeType('application/x-mpegURL') | |
| item.setContentLookup(False) | |
| if '|' in url: | |
| stream_url, strhdr = url.split('|') | |
| item.setProperty('inputstream.adaptive.stream_headers', strhdr) | |
| if kodiver > 19: item.setProperty('inputstream.adaptive.manifest_headers', strhdr) | |
| #item.setPath(stream_url) | |
| url = stream_url | |
| item.setPath(url) | |
| try: | |
| item.setArt({'poster': meta['poster']}) | |
| item.setInfo(type='Video', infoLabels=Info) | |
| except: | |
| pass | |
| item.setProperty('IsPlayable', 'true') | |
| if int(sys.argv[1]) > 0: | |
| xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item) | |
| else: | |
| xbmc.Player().play(url, item) | |
| self.keepPlaybackAlive() | |
| return | |
| except: | |
| return | |
| def keepPlaybackAlive(self): | |
| if self.isdebug: log_utils.log('Start - keepPlaybackAlive', log_utils.LOGINFO) | |
| for i in range(0, 240): | |
| if self.isPlayingVideo(): break | |
| control.sleep(1) | |
| if self.isPlayingVideo(): | |
| try: | |
| playcountDB.createEntry(self.mediatype, self.title, self.name, self.imdb, self.number_of_seasons, self.season, self.number_of_episodes, self.episode) | |
| except: | |
| pass | |
| monitor = xbmc.Monitor() | |
| self.watcher_control = False | |
| while (not monitor.abortRequested()) & (not self.streamFinished): | |
| if self.isPlayingVideo(): | |
| self.totalTime = self.getTotalTime() | |
| self.currentTime = self.getTime() | |
| watcher = (self.currentTime / self.totalTime >= .9) | |
| if watcher and not self.watcher_control: | |
| playcountDB.updatePlaycount(self.mediatype, self.title, self.name, self.imdb, self.number_of_seasons, self.season, self.number_of_episodes, self.episode, 1) | |
| #control.setSetting(id='watcher.control', value='true') | |
| self.watcher_control = True | |
| monitor.waitForAbort(3) | |
| if self.isdebug: log_utils.log('Ende - keepPlaybackAlive', log_utils.LOGINFO) | |
| def idleForPlayback(self): | |
| for i in range(0, 200): | |
| if control.condVisibility('Window.IsActive(busydialog)') == 1: control.idle() | |
| else: break | |
| control.sleep(1) | |
| def onAVStarted(self): | |
| if self.isdebug: log_utils.log('Start - onAVStarted', log_utils.LOGINFO) | |
| control.execute('Dialog.Close(all,true)') | |
| if not self.offset == '0': self.seekTime(float(self.offset)) | |
| self.idleForPlayback() | |
| if control.getSetting('subtitles') == 'true': | |
| subtitles().get(self.name, self.imdb, self.season, self.episode) | |
| # Subtitles in Player Menü ausschalten - wird dann bei Bedarf per "Hand" eingeschaltet | |
| # xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "Player.SetSubtitle", "params": {"playerid": 1, "subtitle" : "on"}, "id": "1"}') | |
| xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "Player.SetSubtitle", "params": {"playerid": 1, "subtitle" : "off"}, "id": "1"}') | |
| if self.isdebug: log_utils.log('Ende - onAVStarted', log_utils.LOGINFO) | |
| def onPlayBackStopped(self): | |
| if self.isdebug: log_utils.log('Start - onPlayBackStopped', log_utils.LOGINFO) | |
| self.runVideoDB() | |
| self.streamFinished = True | |
| bookmarks().reset(self.currentTime, self.totalTime, self.name, self.year) | |
| if self.isdebug: log_utils.log('vor parentDir - onPlayBackStopped', log_utils.LOGINFO) | |
| if self.watcher_control: | |
| self.parentDir() | |
| self.watcher_control = False | |
| if self.isdebug: log_utils.log('Ende - onPlayBackStopped', log_utils.LOGINFO) | |
| def onPlayBackEnded(self): | |
| self.onPlayBackStopped() | |
| if self.isdebug: log_utils.log('Ende - onPlayBackEnded', log_utils.LOGINFO) | |
| def parentDir(self): | |
| refreshtime = 2 | |
| control.sleep(refreshtime) | |
| ccont = '' | |
| if control.getSetting('hosts.mode') == '1': # Liste der Streams (Hosterliste) als Verzeichnis | |
| count = 0 | |
| # prüfen ob Hosterliste aktiv ist - content ist da 'videos' | |
| for count in range(1, 25+1): | |
| control.sleep(2) | |
| ccont = control.getInfoLabel("Container.Content") | |
| if ccont == 'videos': break | |
| if self.isdebug: log_utils.log(__name__ + ' - count: %s - Container.Content (1): %s' % (count, control.getInfoLabel("Container.Content")), log_utils.LOGINFO) | |
| if count == 25: return | |
| # zur Film- bzw. Episodenliste wechseln - von content 'videos' dann zu content 'videos' | |
| if control.getInfoLabel("Container.Content") != 'movies' and ccont == 'videos': | |
| control.execute('Action(ParentDir)') | |
| for count in range(1, 15 + 1): | |
| control.sleep(2) | |
| ccont = control.getInfoLabel("Container.Content") | |
| if ccont == 'movies': break | |
| if self.isdebug: log_utils.log(__name__ + ' - count: %s - Container.Content (2): %s' % (count, control.getInfoLabel("Container.Content")), log_utils.LOGINFO) | |
| if count == 15: | |
| return | |
| else: | |
| refreshtime = 0 | |
| if self.playcount == 0: | |
| ## auch abhängig von control.content() | |
| refresh = False | |
| if control.getSetting('status.refresh.movies') == 'true' and self.mediatype == 'movie': # immer! | |
| refresh = True | |
| elif control.getSetting('status.refresh.episodes') == 'true' and self.mediatype != 'movie': | |
| if xbmc.getCondVisibility('system.platform.linux') and xbmc.getCondVisibility('system.platform.android'): refresh = True # Android | |
| elif control.getSetting('hosts.mode') == '1': refresh = True | |
| if refresh: | |
| if refreshtime != 0: control.sleep(refreshtime) | |
| control.execute('Container.Refresh') | |
| # keine Einträge für bookmarks und files in die Kodi DB 'MyVideos116.db' anlegen bzw. sofort löschen | |
| def runVideoDB(self): | |
| idFile = self.getVideoDB() | |
| if idFile != self.fileID: | |
| self.removeVideoDB(idFile) | |
| def getVideoDB(self): | |
| dbcon = database.connect(self.videoDB) | |
| dbcur = dbcon.cursor() | |
| dbcur.execute("SELECT * FROM files") | |
| match = dbcur.fetchall() | |
| dbcon.close() | |
| if match and len(match) > 0: idFile = len(match) | |
| else: idFile = 0 | |
| return idFile | |
| def removeVideoDB(self, idFile): | |
| dbcon = database.connect(self.videoDB) | |
| dbcur = dbcon.cursor() | |
| dbcur.execute("DELETE FROM files WHERE idFile = '%s'" % idFile) # in DB vorhandener Trigger löscht auch den bookmark | |
| dbcon.commit() | |
| dbcon.close() | |
| class subtitles: | |
| def __init__(self, *args, **kwargs): | |
| from xbmcaddon import Addon | |
| __scriptname__ = "XBMC Subtitles Login" | |
| __version__ = Addon().getAddonInfo('version') # Module version | |
| BASE_URL_XMLRPC = u"http://api.opensubtitles.org/xml-rpc" | |
| self.server = _xmlrpclib.ServerProxy(BASE_URL_XMLRPC, verbose=0) | |
| login = self.server.LogIn(Addon().getSetting('subtitles.os_user'), Addon().getSetting('subtitles.os_pass'), "en", "%s_v%s" % (__scriptname__.replace(" ", "_"), __version__)) | |
| if login["status"] == "200 OK": | |
| self.osdb_token = login["token"] | |
| def get(self, name, imdb, season, episode): | |
| season = str(season) | |
| episode = str(episode) | |
| try: | |
| langDict = {'Afrikaans': 'afr', 'Albanian': 'alb', 'Arabic': 'ara', 'Armenian': 'arm', 'Basque': 'baq', 'Bengali': 'ben', 'Bosnian': 'bos', 'Breton': 'bre', 'Bulgarian': 'bul', 'Burmese': 'bur', 'Catalan': 'cat', 'Chinese': 'chi', 'Croatian': 'hrv', 'Czech': 'cze', 'Danish': 'dan', 'Dutch': 'dut', 'English': 'eng', 'Esperanto': 'epo', 'Estonian': 'est', 'Finnish': 'fin', 'French': 'fre', 'Galician': 'glg', 'Georgian': 'geo', 'German': 'ger', 'Greek': 'ell', 'Hebrew': 'heb', 'Hindi': 'hin', 'Hungarian': 'hun', 'Icelandic': 'ice', 'Indonesian': 'ind', 'Italian': 'ita', 'Japanese': 'jpn', 'Kazakh': 'kaz', 'Khmer': 'khm', 'Korean': 'kor', 'Latvian': 'lav', 'Lithuanian': 'lit', 'Luxembourgish': 'ltz', 'Macedonian': 'mac', 'Malay': 'may', 'Malayalam': 'mal', 'Manipuri': 'mni', 'Mongolian': 'mon', 'Montenegrin': 'mne', 'Norwegian': 'nor', 'Occitan': 'oci', 'Persian': 'per', 'Polish': 'pol', 'Portuguese': 'por,pob', 'Portuguese(Brazil)': 'pob,por', 'Romanian': 'rum', 'Russian': 'rus', 'Serbian': 'scc', 'Sinhalese': 'sin', 'Slovak': 'slo', 'Slovenian': 'slv', 'Spanish': 'spa', 'Swahili': 'swa', 'Swedish': 'swe', 'Syriac': 'syr', 'Tagalog': 'tgl', 'Tamil': 'tam', 'Telugu': 'tel', 'Thai': 'tha', 'Turkish': 'tur', 'Ukrainian': 'ukr', 'Urdu': 'urd'} | |
| codePageDict = {'ara': 'cp1256', 'ar': 'cp1256', 'ell': 'cp1253', 'el': 'cp1253', 'heb': 'cp1255', 'he': 'cp1255', 'tur': 'cp1254', 'tr': 'cp1254', 'rus': 'cp1251', 'ru': 'cp1251'} | |
| # opensubtitles.org | |
| os_user = control.getSetting('subtitles.os_user') | |
| os_pass = control.getSetting('subtitles.os_pass') | |
| os_useragent = 'TemporaryUserAgent' | |
| langs = [] | |
| try: | |
| try: langs = langDict[control.getSetting('subtitles.lang.1')].split(',') | |
| except: langs.append(langDict[control.getSetting('subtitles.lang.1')]) | |
| except: pass | |
| try: | |
| try: langs = langs + langDict[control.getSetting('subtitles.lang.2')].split(',') | |
| except: langs.append(langDict[control.getSetting('subtitles.lang.2')]) | |
| except: pass | |
| try: subLang = xbmc.Player().getSubtitles() | |
| except: subLang = '' | |
| if subLang == langs[0]: raise Exception() | |
| imdbid = re.sub(r'[^0-9]', '', imdb) | |
| if season == 'None' or episode == 'None': | |
| result = self.server.SearchSubtitles(self.osdb_token, [{'sublanguageid': langs[0], 'imdbid': imdbid}])['data'] | |
| if result == []: result = self.server.SearchSubtitles(self.osdb_token, [{'sublanguageid': langs[1], 'imdbid': imdbid}])['data'] | |
| else: | |
| result = self.server.SearchSubtitles(self.osdb_token, [{'sublanguageid': langs[0], 'imdbid': imdbid, 'season': season, 'episode': episode}])['data'] | |
| if result == []: result = self.server.SearchSubtitles(self.osdb_token, [{'sublanguageid': langs[1], 'imdbid': imdbid, 'season': season, 'episode': episode}])['data'] | |
| # fmt = ['hdtv'] | |
| filter = [] | |
| result = [i for i in result if i['SubSumCD'] == '1'] | |
| for userrank in ['OS Legend','Administrator','Translator','Platinum member','Gold member','Silver member', 'Bronze member','trusted','']: | |
| for i in result: | |
| if i['UserRank'] == userrank.lower(): | |
| filter.append(i) | |
| try: lang = xbmc.convertLanguage(filter[0]['SubLanguageID'], xbmc.ISO_639_1) | |
| except: lang = filter[0]['SubLanguageID'] | |
| subtitle = control.translatePath('special://temp/') | |
| subtitle = os.path.join(subtitle, 'TemporarySubs.%s.srt' % lang) | |
| ZipDownloadID = filter[0]['ZipDownloadLink'].split('/')[-1] | |
| ZipDownloadLink = 'https://dl.opensubtitles.org/en/download/sub/%s' % ZipDownloadID | |
| import requests, zipfile | |
| r = requests.get(ZipDownloadLink) | |
| zf = zipfile.ZipFile(_io(r.content)) | |
| content = '' | |
| for name in zf.namelist(): | |
| if not name.endswith('.srt'): continue | |
| content = zf.read(name) | |
| codepage = codePageDict.get(lang, '') | |
| if codepage and control.getSetting('subtitles.utf') == 'true': | |
| try: | |
| content_encoded = codecs.decode(content, codepage) | |
| content = codecs.encode(content_encoded, 'utf-8') | |
| except: | |
| pass | |
| output = open(subtitle, 'wb') | |
| output.write(content) | |
| output.close() | |
| control.sleep(1) | |
| xbmc.Player().setSubtitles(subtitle) | |
| except: | |
| pass | |
| class bookmarks: | |
| def get(self, name, year='0'): | |
| from resources.lib import bookmarkDB | |
| offset = '0' | |
| try: | |
| if not control.getSetting('bookmarks') == 'true': raise Exception() | |
| idFile = hashlib.md5() | |
| for i in name: | |
| try: | |
| idFile.update(str(i).encode('utf-8')) | |
| except: | |
| idFile.update(str(i)) | |
| for i in year: | |
| try: | |
| idFile.update(str(i).encode('utf-8')) | |
| except: | |
| idFile.update(str(i)) | |
| idFile = str(idFile.hexdigest()) | |
| match = bookmarkDB.get_query(idFile, 'bookmarks.pcl') | |
| # dbcon = database.connect(control.bookmarksFile) | |
| # dbcur = dbcon.cursor() | |
| # dbcur.execute("CREATE TABLE IF NOT EXISTS bookmark (""idFile TEXT, ""timeInSeconds TEXT, ""UNIQUE(idFile)"");") | |
| # dbcur.execute("SELECT * FROM bookmark WHERE idFile = '%s'" % idFile) | |
| # match = dbcur.fetchone() | |
| # dbcon.commit() | |
| # dbcon.close() | |
| if match: self.offset = str(match[1]) | |
| if self.offset == '0': raise Exception() | |
| minutes, seconds = divmod(float(self.offset), 60) | |
| hours, minutes = divmod(minutes, 60) | |
| label = '%02d:%02d:%02d' % (hours, minutes, seconds) | |
| label = py2_encode("Fortsetzen ab : %s" % label) | |
| if control.getSetting('bookmarks.auto') == 'false': | |
| try: | |
| yes = control.dialog.contextmenu([label, "Vom Anfang abspielen", ]) | |
| except: | |
| yes = control.yesnoDialog(label, '', '', str(name), "Fortsetzen", | |
| "Vom Anfang abspielen") | |
| if yes: | |
| bookmarkDB.remove_query(idFile, 'bookmarks') | |
| self.offset = '0' | |
| return self.offset | |
| except: | |
| return offset | |
| def reset(self, currentTime, totalTime, name, year='0'): | |
| from resources.lib import bookmarkDB | |
| try: | |
| #if not control.getSetting('bookmarks') == 'true': raise Exception() | |
| if control.getSetting('bookmarks') == 'true' and int(currentTime) > 180: | |
| timeInSeconds = str(currentTime) | |
| idFile = hashlib.md5() | |
| for i in name: | |
| try: | |
| idFile.update(str(i).encode('utf-8')) | |
| except: | |
| idFile.update(str(i)) | |
| for i in year: | |
| try: | |
| idFile.update(str(i).encode('utf-8')) | |
| except: | |
| idFile.update(str(i)) | |
| idFile = str(idFile.hexdigest()) | |
| if (currentTime / totalTime) >= .92: | |
| bookmarkDB.remove_query(idFile, 'bookmarks') | |
| else: | |
| bookmarkDB.save_query(idFile, timeInSeconds, 'bookmarks') | |
| # dbcon = database.connect(control.bookmarksFile) | |
| # dbcur = dbcon.cursor() | |
| # dbcur.execute("CREATE TABLE IF NOT EXISTS bookmark (""idFile TEXT, ""timeInSeconds TEXT, ""UNIQUE(idFile)"");") | |
| # if (currentTime / totalTime) <= .92: | |
| # dbcur.execute("DELETE FROM bookmark WHERE idFile = '%s'" % idFile) | |
| # dbcur.execute("INSERT INTO bookmark Values (?, ?)", (idFile, timeInSeconds)) | |
| # else: | |
| # dbcur.execute("DELETE FROM bookmark WHERE idFile = '%s'" % idFile) | |
| # dbcon.commit() | |
| # dbcon.close() | |
| except: | |
| pass |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # Python 3 | |
| # edit 2025-06-12 | |
| """ | |
| RH - edit by kasi - xstream -> xship | |
| 2025-04-08 | |
| - move cCache() from tools to RH | |
| - edit __cleanupUrl() | |
| - edit request() read cache and save cache | |
| -- kasi -- | |
| Nach kodi neustart ist RamCache leer. | |
| wenn FileCache vorhanden nutze diesen und mache ihn zum RamCache | |
| """ | |
| import time | |
| import xbmcgui | |
| import re | |
| import os | |
| import hashlib | |
| import json | |
| import traceback | |
| import ssl | |
| import certifi | |
| import socket | |
| import zlib | |
| import http.client | |
| from resources.lib.control import dataPath, addonName, getSetting, quote_plus | |
| try: | |
| import xbmcgui, xbmc, xbmcaddon | |
| ADDON_NAME = addonName | |
| profilePath = dataPath | |
| except ImportError: | |
| ADDON_NAME = '' | |
| profilePath = '' | |
| _getSetting = xbmcaddon.Addon().getSetting | |
| def getSetting(Name, default=''): | |
| result = _getSetting(Name) | |
| if result: | |
| return result | |
| else: | |
| return default | |
| from resources.lib.tools import logger | |
| from urllib.parse import quote, urlencode, urlparse | |
| from urllib.error import HTTPError, URLError | |
| from urllib.request import HTTPHandler, HTTPSHandler, Request, HTTPCookieProcessor, build_opener, urlopen, HTTPRedirectHandler | |
| from http.cookiejar import LWPCookieJar, Cookie | |
| from http.client import HTTPException | |
| from random import choice | |
| class IPHTTPSConnection(http.client.HTTPSConnection): | |
| def __init__(self, host, ip=None, port=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, context=None): | |
| self.context = context | |
| # If an IP is provided, connect to it rather than the resolved host. | |
| self.ip = ip | |
| self.actual_host = host # original hostname for SNI and Host header | |
| super().__init__(host if not ip else ip, port, timeout=timeout, context=context) | |
| def connect(self): | |
| # Create a socket connection to the provided IP (if any) | |
| if self.ip: | |
| self.sock = self._create_connection((self.ip, self.port), self.timeout) | |
| #if self._tunnel_host: | |
| # self._tunnel() | |
| # Wrap the socket with our SSL context using the actual host for SNI. | |
| self.sock = self.context.wrap_socket(self.sock, server_hostname=self.actual_host) | |
| else: | |
| super().connect() | |
| class CustomSecureHTTPSHandler(HTTPSHandler): | |
| def __init__(self, ip=None): | |
| # Create an SSL context with certifi's CA bundle. | |
| context = ssl.create_default_context(cafile=certifi.where()) | |
| # If an IP is provided, disable hostname checking (since we'll verify using SNI later). | |
| context.check_hostname = False if ip else True | |
| context.verify_mode = ssl.CERT_REQUIRED | |
| self.ip = ip | |
| self.context = context | |
| super().__init__(context=context) | |
| def https_open(self, req): | |
| # Extract the hostname from the request URL. | |
| parsed = urlparse(req.full_url) | |
| host = parsed.hostname | |
| # Define a connection factory that returns an IPHTTPSConnection | |
| def connection_factory(*args, **kwargs): | |
| return IPHTTPSConnection(host, ip=self.ip, timeout=req.timeout, context=self.context) | |
| return self.do_open(connection_factory, req) | |
| class RedirectFilter(HTTPRedirectHandler): | |
| def redirect_request(self, req, fp, code, msg, hdrs, newurl): | |
| if getSetting('bypassDNSlock', 'false') != 'true': | |
| if 'notice.cuii' in newurl: | |
| xbmcgui.Dialog().ok("xShip Support Information", "Ihr Internetanbieter zensiert ihren Internetzugang!" + '\n' + "Um sich vor der Zensur zu schützen, empfehlen wir euren DNS Server im Router bzw. auf Euren Geräten auf Google oder Cloudflare umzustellen - für die Protokolle IPv4 UND IPv6! Anleitungen findet Ihr per Googlesuche z.B. 'Fritzbox DNS Server ändern'") # die neue Funktion 'DNS Sperre umgehen' in xShip zu aktivieren oder | |
| return None | |
| return HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl) | |
| class cRequestHandler: | |
| # useful for e.g. tmdb request where multiple requests are made within a loop | |
| persistent_openers = {} | |
| @staticmethod | |
| def RandomUA(): | |
| #Random User Agents aktualisiert 08.06.2025 | |
| FF_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:139.0) Gecko/20100101 Firefox/139.0' | |
| OPERA_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 OPR/119.0.0.0' | |
| ANDROID_USER_AGENT = 'Mozilla/5.0 (Linux; Android 15; SM-S931U Build/AP3A.240905.015.A2; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/132.0.6834.163 Mobile Safari/537.36' | |
| EDGE_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36 Edg/134.0.0.0' | |
| CHROME_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36' | |
| SAFARI_USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 14_7_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/18.4 Safari/605.1.15' | |
| _User_Agents = [FF_USER_AGENT, OPERA_USER_AGENT, EDGE_USER_AGENT, CHROME_USER_AGENT, SAFARI_USER_AGENT] | |
| return choice(_User_Agents) | |
| def __init__(self, sUrl, caching=True, ignoreErrors=True, compression=True, jspost=False, ssl_verify=False, bypass_dns=False): | |
| self._sUrl = self.__cleanupUrl(sUrl) | |
| self._sRealUrl = '' | |
| # self._USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:99.0) Gecko/20100101 Firefox/99.0' | |
| self._USER_AGENT = self.RandomUA() | |
| self._aParameters = {} | |
| self._headerEntries = {} | |
| self._profilePath = profilePath | |
| self._cachePath = '' | |
| self._cookiePath = '' | |
| self._Status = '' | |
| self._sResponseHeader = '' | |
| self._ssl_verify = ssl_verify | |
| self._bypass_dns = bypass_dns | |
| self.ignoreDiscard(False) | |
| self.ignoreExpired(False) | |
| self.caching = caching | |
| self.ignoreErrors = ignoreErrors | |
| self.compression = compression | |
| self.jspost = jspost | |
| self.cacheTime = int(getSetting('cacheTime', 600)) | |
| self.requestTimeout = int(getSetting('requestTimeout', 10)) | |
| self.bypassDNSlock = (getSetting('bypassDNSlock', 'false') == 'true') | |
| self.removeBreakLines(True) | |
| self.removeNewLines(True) | |
| self.__setDefaultHeader() | |
| self.__setCachePath() | |
| self.__setCookiePath() | |
| self.isMemoryCacheActive = (getSetting('volatileHtmlCache', 'false') == 'true') | |
| if self.isMemoryCacheActive: | |
| self._memCache = cCache() | |
| socket.setdefaulttimeout(self.requestTimeout) | |
| def getStatus(self): | |
| return self._Status | |
| def removeNewLines(self, bRemoveNewLines): | |
| self.__bRemoveNewLines = bRemoveNewLines | |
| def removeBreakLines(self, bRemoveBreakLines): | |
| self.__bRemoveBreakLines = bRemoveBreakLines | |
| def addHeaderEntry(self, sHeaderKey, sHeaderValue): | |
| self._headerEntries[sHeaderKey] = sHeaderValue | |
| def getHeaderEntry(self, sHeaderKey): | |
| if sHeaderKey in self._headerEntries: | |
| return self._headerEntries[sHeaderKey] | |
| def addParameters(self, key, value, Quote=False): | |
| self._aParameters[key] = value if not Quote else quote(str(value)) | |
| def getResponseHeader(self): | |
| return self._sResponseHeader | |
| def getRealUrl(self): | |
| return self._sRealUrl | |
| def getRequestUri(self): | |
| return self._sUrl + '?' + urlencode(self._aParameters) | |
| def __setDefaultHeader(self): | |
| self.addHeaderEntry('User-Agent', self._USER_AGENT) | |
| self.addHeaderEntry('Accept-Language', 'de,en-US;q=0.7,en;q=0.3') | |
| self.addHeaderEntry('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8') | |
| if self.compression: | |
| self.addHeaderEntry('Accept-Encoding', 'gzip, deflate') | |
| self.addHeaderEntry('Connection', 'keep-alive') | |
| self.addHeaderEntry('Keep-Alive', 'timeout=5') | |
| @staticmethod | |
| def __getDefaultHandler(ssl_verify, ip=None): | |
| if ip: | |
| return [CustomSecureHTTPSHandler(ip=ip)] | |
| elif ssl_verify: | |
| return [CustomSecureHTTPSHandler()] | |
| else: | |
| ssl_context = ssl.create_default_context() | |
| ssl_context.check_hostname = False | |
| ssl_context.verify_mode = ssl.CERT_NONE | |
| return [HTTPSHandler(context=ssl_context)] | |
| @staticmethod | |
| def __cleanupUrl(url): | |
| #------- kasi ---------- | |
| p = urlparse(url) | |
| if p.query: | |
| query = quote_plus(p.query).replace('%3D', '=').replace('%26', '&') | |
| p = p._replace(query=p.query.replace(p.query, query)) | |
| else: | |
| path = quote_plus(p.path).replace('%2F', '/').replace('%26', '&').replace('%3D', '=') | |
| p = p._replace(path=p.path.replace(p.path, path)) | |
| return p.geturl() | |
| def request(self): | |
| if self.caching and self.cacheTime > 0: | |
| if self.isMemoryCacheActive: | |
| sContent = self.__readVolatileCache(self.getRequestUri(), self.cacheTime) | |
| else: | |
| sContent = self.__readPersistentCache(self.getRequestUri()) | |
| if sContent: | |
| self._Status = '200' | |
| return sContent | |
| #-------- kasi ------------ | |
| else: | |
| if self.isMemoryCacheActive: | |
| sContent = self.__readPersistentCache(self.getRequestUri()) | |
| if sContent: | |
| self._Status = '200' | |
| self.__writeVolatileCache(self.getRequestUri(), sContent) | |
| return sContent | |
| # nur ausführen wenn der übergabeparameter und die konfiguration passen | |
| if self._bypass_dns and self.bypassDNSlock: | |
| ### DNS lock bypass | |
| ip_override = self.__doh_request(self._sUrl) | |
| ### DNS lock bypass | |
| else: | |
| ip_override = None | |
| cookieJar = LWPCookieJar(filename=self._cookiePath) | |
| try: | |
| cookieJar.load(ignore_discard=self.__bIgnoreDiscard, ignore_expires=self.__bIgnoreExpired) | |
| except Exception as e: | |
| logger.debug(e) | |
| domain = urlparse(self._sUrl).netloc | |
| if domain in cRequestHandler.persistent_openers: | |
| opener = cRequestHandler.persistent_openers[domain] | |
| else: | |
| handlers = self.__getDefaultHandler(self._ssl_verify, ip_override) | |
| handlers += [HTTPHandler(), HTTPCookieProcessor(cookiejar=cookieJar), RedirectFilter()] | |
| opener = build_opener(*handlers) | |
| cRequestHandler.persistent_openers[domain] = opener | |
| sParameters = json.dumps(self._aParameters).encode() if self.jspost else urlencode(self._aParameters, True).encode() | |
| oRequest = Request(self._sUrl, sParameters if len(sParameters) > 0 else None) | |
| for key, value in self._headerEntries.items(): | |
| oRequest.add_header(key, value) | |
| if self.jspost: | |
| oRequest.add_header('Content-Type', 'application/json') | |
| cookieJar.add_cookie_header(oRequest) | |
| try: | |
| oResponse = opener.open(oRequest) | |
| except HTTPError as e: | |
| if e.code >= 400: | |
| self._Status = str(e.code) | |
| data = e.fp.read() | |
| if 'DDOS-GUARD' in str(data): | |
| opener = build_opener(HTTPCookieProcessor(cookieJar)) | |
| opener.addheaders = [('User-agent', self._USER_AGENT), ('Referer', self._sUrl)] | |
| response = opener.open('https://check.ddos-guard.net/check.js') | |
| content = response.read().decode('utf-8', 'replace') | |
| url2 = re.findall("Image.*?'([^']+)'; new", content) | |
| url3 = urlparse(self._sUrl) | |
| url3 = '%s://%s/%s' % (url3.scheme, url3.netloc, url2[0]) | |
| opener = build_opener(HTTPCookieProcessor(cookieJar)) | |
| opener.addheaders = [('User-agent', self._USER_AGENT), ('Referer', self._sUrl)] | |
| opener.open(url3).read() | |
| opener = build_opener(HTTPCookieProcessor(cookieJar)) | |
| opener.addheaders = [('User-agent', self._USER_AGENT), ('Referer', self._sUrl)] | |
| oResponse = opener.open(self._sUrl, sParameters if len(sParameters) > 0 else None) | |
| if not oResponse: | |
| logger.error(' -> [requestHandler]: Failed DDOS-GUARD active: ' + self._sUrl) | |
| return 'DDOS GUARD SCHUTZ' | |
| elif 'cloudflare' in str(e.headers): | |
| if not self.ignoreErrors: | |
| value = ('!!! CLOUDFLARE-SCHUTZ AKTIV !!! Weitere Informationen: ' + str(e.__class__.__name__) + ' : ' + str(e), str(traceback.format_exc().splitlines()[-3].split('addons')[-1])) | |
| xbmcgui.Dialog().ok('xShip', str(value)) # Error | |
| logger.error(' -> [requestHandler]: Failed Cloudflare active: ' + self._sUrl) | |
| return 'CLOUDFLARE-SCHUTZ AKTIV' # Meldung geht als "e.doc" in die exception nach default.py | |
| else: | |
| if not self.ignoreErrors: | |
| xbmcgui.Dialog().ok('xShip', "Fehler beim Abrufen der Url:" + ' {0} {1}'.format(self._sUrl, str(e))) | |
| logger.error(' -> [requestHandler]: HTTPError ' + str(e) + ' Url: ' + self._sUrl) | |
| return 'SEITE NICHT ERREICHBAR' | |
| else: | |
| if not self.ignoreErrors: | |
| xbmcgui.Dialog().ok('xShip', "Fehler beim Abrufen der Url:" + ' {0} {1}'.format(self._sUrl, str(e))) | |
| logger.error(' -> [requestHandler]: HTTPError ' + str(e) + ' Url: ' + self._sUrl) | |
| return 'SEITE NICHT ERREICHBAR' | |
| except URLError as e: | |
| if not self.ignoreErrors: | |
| xbmcgui.Dialog().ok('xShip', str(e.reason)) | |
| logger.error(' -> [requestHandler]: URLError ' + str(e.reason) + ' Url: ' + self._sUrl) | |
| return 'URL FEHLER' | |
| except HTTPException as e: | |
| if not self.ignoreErrors: | |
| xbmcgui.Dialog().ok('xShip', str(e)) | |
| logger.error(' -> [requestHandler]: HTTPException ' + str(e) + ' Url: ' + self._sUrl) | |
| return 'TIMEOUT' | |
| self._sResponseHeader = oResponse.info() | |
| content_encoding = self._sResponseHeader.get('Content-Encoding', '').lower() | |
| if content_encoding: | |
| raw_content = oResponse.read() | |
| if content_encoding == 'gzip': | |
| decompressed = zlib.decompress(raw_content, wbits=zlib.MAX_WBITS | 16) | |
| elif content_encoding == 'deflate': | |
| decompressed = zlib.decompress(raw_content, wbits=-zlib.MAX_WBITS) | |
| else: | |
| decompressed = raw_content | |
| sContent = decompressed.decode('utf-8', 'replace') | |
| else: | |
| sContent = oResponse.read().decode('utf-8', 'replace') | |
| if 'lazingfast' in sContent: | |
| bf = cBF().resolve(self._sUrl, sContent, cookieJar, self._USER_AGENT, sParameters) | |
| if bf: | |
| sContent = bf | |
| else: | |
| logger.error(' -> [requestHandler]: Failed Blazingfast active: ' + self._sUrl) | |
| try: | |
| cookieJar.save(ignore_discard=self.__bIgnoreDiscard, ignore_expires=self.__bIgnoreExpired) | |
| except Exception as e: | |
| logger.error(' -> [requestHandler]: Failed save cookie: %s' % e) | |
| self._sRealUrl = oResponse.geturl() | |
| self._Status = oResponse.getcode() if self._sUrl == self._sRealUrl else '301' | |
| if self.__bRemoveNewLines: | |
| sContent = sContent.replace('\n', '').replace('\r\t', '') | |
| if self.__bRemoveBreakLines: | |
| sContent = sContent.replace(' ', '') | |
| if self.caching and self.cacheTime > 0: | |
| if self.isMemoryCacheActive: | |
| self.__writeVolatileCache(self.getRequestUri(), sContent) | |
| self.__writePersistentCache(self.getRequestUri(), sContent) # kasi | |
| else: | |
| self.__writePersistentCache(self.getRequestUri(), sContent) | |
| return sContent | |
| def __setCookiePath(self): | |
| cookieFile = os.path.join(self._profilePath, 'cookies') | |
| if not os.path.exists(cookieFile): | |
| os.makedirs(cookieFile) | |
| if 'dummy' not in self._sUrl: | |
| cookieFile = os.path.join(cookieFile, urlparse(self._sUrl).netloc.replace('.', '_') + '.txt') | |
| if not os.path.exists(cookieFile): | |
| open(cookieFile, 'w').close() | |
| self._cookiePath = cookieFile | |
| def getCookie(self, sCookieName, sDomain=''): | |
| cookieJar = LWPCookieJar() | |
| try: | |
| cookieJar.load(self._cookiePath, self.__bIgnoreDiscard, self.__bIgnoreExpired) | |
| except Exception as e: | |
| logger.error(e) | |
| for entry in cookieJar: | |
| if entry.name == sCookieName: | |
| if sDomain == '': | |
| return entry | |
| elif entry.domain == sDomain: | |
| return entry | |
| return False | |
| def setCookie(self, oCookie): | |
| cookieJar = LWPCookieJar() | |
| try: | |
| cookieJar.load(self._cookiePath, self.__bIgnoreDiscard, self.__bIgnoreExpired) | |
| cookieJar.set_cookie(oCookie) | |
| cookieJar.save(self._cookiePath, self.__bIgnoreDiscard, self.__bIgnoreExpired) | |
| except Exception as e: | |
| logger.error(e) | |
| def ignoreDiscard(self, bIgnoreDiscard): | |
| self.__bIgnoreDiscard = bIgnoreDiscard | |
| def ignoreExpired(self, bIgnoreExpired): | |
| self.__bIgnoreExpired = bIgnoreExpired | |
| def __doh_request(self, url, doh_server="https://cloudflare-dns.com/dns-query"): | |
| # Parse the URL | |
| parsed_url = urlparse(url) | |
| hostname = parsed_url.hostname | |
| key = 'doh_request' + hostname | |
| if self.isMemoryCacheActive and self.cacheTime > 0: | |
| ip_address = self.__readVolatileCache(key, self.cacheTime) | |
| if ip_address: | |
| return ip_address | |
| params = urlencode({"name": hostname, "type": "A"}) | |
| doh_url = f"{doh_server}?{params}" | |
| req = Request(doh_url) | |
| req.add_header("Accept", "application/dns-json") | |
| try: | |
| response = urlopen(req, timeout=5) | |
| response_text = response.read().decode("utf-8", "replace") | |
| dns_response = json.loads(response_text) | |
| if "Answer" not in dns_response: | |
| raise Exception("Invalid DNS response") | |
| ip_address = dns_response["Answer"][0]["data"] | |
| if self.isMemoryCacheActive and self.cacheTime > 0: | |
| self.__writeVolatileCache(key, ip_address) | |
| return ip_address | |
| except Exception as e: | |
| logger.error(' -> [requestHandler]: DNS query failed: %s' % e) | |
| return None | |
| def __setCachePath(self): | |
| cache = os.path.join(self._profilePath, 'htmlcache') | |
| if not os.path.exists(cache): | |
| os.makedirs(cache) | |
| self._cachePath = cache | |
| def __readPersistentCache(self, url): | |
| h = hashlib.md5(url.encode('utf8')).hexdigest() | |
| cacheFile = os.path.join(self._cachePath, h) | |
| fileAge = self.getFileAge(cacheFile) | |
| if 0 < fileAge < self.cacheTime: | |
| try: | |
| with open(cacheFile, 'rb') as f: | |
| content = f.read().decode('utf8') | |
| except Exception: | |
| logger.error(' -> [requestHandler]: Could not read Cache') | |
| if content: | |
| logger.info(' -> [requestHandler]: read html for %s from cache' % url) | |
| return content | |
| return None | |
| def __writePersistentCache(self, url, content): | |
| try: | |
| h = hashlib.md5(url.encode('utf8')).hexdigest() | |
| with open(os.path.join(self._cachePath, h), 'wb') as f: | |
| f.write(content.encode('utf8')) | |
| except Exception: | |
| logger.error(' -> [requestHandler]: Could not write Cache') | |
| def __writeVolatileCache(self, url, content): | |
| self._memCache.set(hashlib.md5(url.encode('utf8')).hexdigest(), content) | |
| def __readVolatileCache(self, url, cache_time): | |
| entry = self._memCache.get(hashlib.md5(url.encode('utf8')).hexdigest(), cache_time) | |
| if entry: | |
| logger.info(' -> [requestHandler]: read html for %s from cache' % url) | |
| return entry | |
| @staticmethod | |
| def getFileAge(cacheFile): | |
| try: | |
| return time.time() - os.stat(cacheFile).st_mtime | |
| except Exception: | |
| return 0 | |
| def clearCache(self): | |
| # clear volatile cache | |
| if self.isMemoryCacheActive: | |
| self._memCache.clear() | |
| cRequestHandler.persistent_openers.clear() | |
| # clear persistent cache | |
| files = os.listdir(self._cachePath) | |
| for file in files: | |
| os.remove(os.path.join(self._cachePath, file)) | |
| xbmcgui.Dialog().notification('xShip', "HTML Cache Ordner wurde bereinigt", xbmcgui.NOTIFICATION_INFO, 100, False) | |
| class cBF: | |
| def resolve(self, url, html, cookie_jar, user_agent, sParameters): | |
| page = urlparse(url).scheme + '://' + urlparse(url).netloc | |
| j = re.compile(r'<script[^>]src="([^"]+)').findall(html) | |
| if j: | |
| opener = build_opener(HTTPCookieProcessor(cookie_jar)) | |
| opener.addheaders = [('User-agent', user_agent), ('Referer', url)] | |
| opener.open(page + j[0]) | |
| a = re.compile(r'xhr\.open\("GET","([^,]+)",').findall(html) | |
| if a: | |
| import random | |
| aespage = page + a[0].replace('" + ww +"', str(random.randint(700, 1500))) | |
| opener = build_opener(HTTPCookieProcessor(cookie_jar)) | |
| opener.addheaders = [('User-agent', user_agent), ('Referer', url)] | |
| html = opener.open(aespage).read().decode('utf-8', 'replace') | |
| cval = self.aes_decode(html) | |
| cdata = re.compile(r'cookie="([^="]+).*?domain[^>]=([^;]+)').findall(html) | |
| if cval and cdata: | |
| c = Cookie(version=0, name=cdata[0][0], value=cval, port=None, port_specified=False, domain=cdata[0][1], domain_specified=True, domain_initial_dot=False, path="/", path_specified=True, secure=False, expires=time.time() + 21600, discard=False, comment=None, comment_url=None, rest={}) | |
| cookie_jar.set_cookie(c) | |
| opener = build_opener(HTTPCookieProcessor(cookie_jar)) | |
| opener.addheaders = [('User-agent', user_agent), ('Referer', url)] | |
| return opener.open(url, sParameters if len(sParameters) > 0 else None).read().decode('utf-8', 'replace') | |
| @staticmethod | |
| def aes_decode(html): | |
| try: | |
| import pyaes | |
| keys = re.compile(r'toNumbers\("([^"]+)"').findall(html) | |
| if keys: | |
| from binascii import hexlify, unhexlify | |
| msg = unhexlify(keys[2]) | |
| key = unhexlify(keys[0]) | |
| iv = unhexlify(keys[1]) | |
| decrypter = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(key, iv)) | |
| plain_text = decrypter.feed(msg) | |
| plain_text += decrypter.feed() | |
| return hexlify(plain_text).decode() | |
| except Exception as e: | |
| logger.error(e) | |
| class cCache(object): | |
| _win = None | |
| def __init__(self): | |
| # see https://kodi.wiki/view/Window_IDs | |
| # use WINDOW_SCREEN_CALIBRATION to store all data | |
| self._win = xbmcgui.Window(10011) | |
| def __del__(self): | |
| del self._win | |
| def get(self, key, cache_time): | |
| cachedata = self._win.getProperty(key) | |
| if cachedata: | |
| cachedata = eval(cachedata) | |
| if time.time() - cachedata[0] < cache_time: | |
| return cachedata[1] | |
| else: | |
| self._win.clearProperty(key) | |
| return None | |
| def set(self, key, data): | |
| self._win.setProperty(key, repr((time.time(), data))) | |
| def clear(self): | |
| self._win.clearProperties() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # kasi - SourceCode class _Storage() von https://github.com/vlmaksime/script.module.simpleplugin | |
| # 2022-10-05 | |
| # edit 2023-03-11 | |
| """ | |
| SimplePlugin micro-framework for Kodi content plugins | |
| **Author**: Roman Miroshnychenko aka Roman V.M. | |
| **License**: `GPL v.3 <https://www.gnu.org/copyleft/gpl.html>`_ | |
| """ | |
| import os,sys | |
| import time | |
| import hashlib | |
| import pickle | |
| from copy import deepcopy | |
| from shutil import copyfile | |
| import xbmc, xbmcaddon, xbmcvfs | |
| if sys.version_info.major == 3: | |
| from urllib.parse import quote_plus | |
| from collections.abc import MutableMapping | |
| translatePath = xbmcvfs.translatePath | |
| else: | |
| from urllib import quote_plus | |
| from collections import MutableMapping | |
| translatePath = xbmc.translatePath | |
| class _Storage(MutableMapping): | |
| """ | |
| Storage(storage_dir, filename='storage.pcl') | |
| Persistent storage for arbitrary data with a dictionary-like interface | |
| It is designed as a context manager and better be used | |
| with 'with' statement. | |
| :param storage_dir: directory for storage | |
| :type storage_dir: str | |
| :param filename: the name of a storage file (optional) | |
| :type filename: str | |
| Usage:: | |
| with Storage('/foo/bar/storage/') as storage: | |
| storage['key1'] = value1 | |
| value2 = storage['key2'] | |
| .. note:: After exiting :keyword:`with` block a :class:`Storage` instance | |
| is invalidated. Storage contents are saved to disk only for | |
| a new storage or if the contents have been changed. | |
| """ | |
| def __init__(self, storage_dir, filename='storage.pcl'): | |
| """ | |
| Class constructor | |
| :type storage_dir: str | |
| :type filename: str | |
| """ | |
| # insert by kasi | |
| name, ext = os.path.splitext(filename) | |
| if not ext: | |
| ext = '.pcl' | |
| filename = name + ext | |
| self._storage = {} | |
| self._hash = None | |
| self._filename = os.path.join(storage_dir, filename) | |
| try: | |
| with open(self._filename, 'rb') as fo: | |
| contents = fo.read() | |
| self._storage = pickle.loads(contents) | |
| self._hash = hashlib.md5(contents).hexdigest() | |
| except (IOError, pickle.PickleError, EOFError, AttributeError): | |
| pass | |
| def __enter__(self): | |
| return self | |
| def __exit__(self, t, v, tb): | |
| self.flush() | |
| def __getitem__(self, key): | |
| return self._storage[key] | |
| def __setitem__(self, key, value): | |
| self._storage[key] = value | |
| def __delitem__(self, key): | |
| del self._storage[key] | |
| def __iter__(self): | |
| return iter(self._storage) | |
| def __len__(self): | |
| return len(self._storage) | |
| def __str__(self): | |
| return '<Storage {0}>'.format(self._storage) | |
| def flush(self): | |
| """ | |
| Save storage contents to disk | |
| This method saves new and changed :class:`Storage` contents to disk | |
| and invalidates the Storage instance. Unchanged Storage is not saved | |
| but simply invalidated. | |
| """ | |
| contents = pickle.dumps(self._storage, protocol=2) | |
| if self._hash is None or hashlib.md5(contents).hexdigest() != self._hash: | |
| tmp = self._filename + '.tmp' | |
| start = time.time() | |
| while os.path.exists(tmp): | |
| if time.time() - start > 2.0: | |
| raise TimeoutError( | |
| 'Exceeded timeout for saving {0} contents!'.format(self) | |
| ) | |
| xbmc.sleep(100) | |
| try: | |
| with open(tmp, 'wb') as fo: | |
| fo.write(contents) | |
| copyfile(tmp, self._filename) | |
| finally: | |
| os.remove(tmp) | |
| del self._storage | |
| def copy(self): | |
| """ | |
| Make a copy of storage contents | |
| .. note:: this method performs a *deep* copy operation. | |
| :return: a copy of storage contents | |
| :rtype: dict | |
| """ | |
| return deepcopy(self._storage) | |
| def _py2_decode(s, encoding='utf-8'): | |
| """ | |
| Decode Python 2 ``str`` to ``unicode`` | |
| In Python 3 the string is not changed. | |
| """ | |
| if sys.version_info.major == 2 and isinstance(s, bytes): | |
| s = s.decode(encoding) | |
| return s | |
| def _get_storage(filename='storage.pcl'): | |
| """ | |
| Get a persistent :class:`Storage` instance for storing arbitrary values | |
| between addon calls. | |
| A :class:`Storage` instance can be used as a context manager. | |
| Example:: | |
| with plugin.get_storage() as storage: | |
| storage['param1'] = value1 | |
| value2 = storage['param2'] | |
| .. note:: After exiting :keyword:`with` block a :class:`Storage` | |
| instance is invalidated. | |
| :param filename: the name of a storage file (optional) | |
| :type filename: str | |
| :return: Storage object | |
| :rtype: Storage | |
| """ | |
| if filename == None or filename == '': filename='storage.pcl' | |
| _profile_dir = _py2_decode(translatePath(xbmcaddon.Addon().getAddonInfo('profile'))) | |
| return _Storage(_profile_dir, filename) | |
| # 'movies','tvshows', 'person' | |
| def save_query(query, filename=None): | |
| with _get_storage(filename) as storage: | |
| if 'queries' not in storage: | |
| storage['queries'] = [] | |
| entry = { | |
| 'query': query | |
| } | |
| if entry in storage['queries']: | |
| storage['queries'].remove(entry) | |
| storage['queries'].insert(0, entry) | |
| def remove_query(index, filename=None): | |
| with _get_storage(filename) as storage: | |
| storage['queries'].pop(int(index)) | |
| xbmc.executebuiltin('Container.Refresh') | |
| def remove_all_query(filename=None): | |
| with _get_storage(filename) as storage: | |
| while True: | |
| if len(storage['queries']) > 0: | |
| storage['queries'].pop() | |
| else: | |
| break | |
| xbmc.executebuiltin('Container.Refresh') | |
| def getSearchTerms(filename=None): # 'movies','tvshows', 'person' | |
| with _get_storage(filename) as storage: | |
| if 'queries' not in storage: | |
| storage['queries'] = [] | |
| return storage['queries'] | |
| def search_new(filename=None): # 'movies','tvshows', 'person' | |
| k = xbmc.Keyboard('', "Suche") | |
| k.doModal() | |
| term = k.getText() if k.isConfirmed() else None | |
| if term is None or term == '': return | |
| term = term.strip() | |
| save_query(term, filename) | |
| table = filename.split('.')[0] # test only | |
| url = '%s?action=%s&page=1&query=%s' % (sys.argv[0], table, quote_plus(term)) | |
| xbmc.executebuiltin('Container.Update(%s)' % url) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #2021-07-21 | |
| # edit 2025-08-02 switch from treads to concurrent.futures | |
| import sys, re | |
| import datetime, time, json | |
| from resources.lib.tmdb import cTMDB | |
| from concurrent.futures import ThreadPoolExecutor | |
| from resources.lib import control, playcountDB, log_utils | |
| from resources.lib.control import getKodiVersion | |
| if int(getKodiVersion()) >= 20: from infotagger.listitem import ListItemInfoTag | |
| _params = dict(control.parse_qsl(sys.argv[2].replace('?',''))) if len(sys.argv) > 1 else dict() | |
| class seasons: | |
| def __init__(self): | |
| self.list = [] | |
| self.lang = "de" | |
| self.sysmeta = _params['sysmeta'] | |
| #self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours=5)) | |
| #self.systime = (self.datetime).strftime('%Y%m%d%H%M%S%f') | |
| def get(self, params): | |
| try: | |
| data = json.loads(params['sysmeta']) | |
| self.title = data['title'] | |
| if not 'number_of_seasons' in data or not data['number_of_seasons']: return | |
| number_of_seasons = data['number_of_seasons'] | |
| tmdb_id = data['tmdb_id'] | |
| tvdb_id = data['tvdb_id'] if 'tvdb_id' in data else None | |
| imdb_id = data['imdb_id'] if 'imdb_id' in data else None | |
| title = data['title'] | |
| playcount = playcountDB.getPlaycount('tvshow', 'title', title, None, None) | |
| if playcount is None: | |
| #playcountDB.createEntry('tvshow', title, title, imdb_id, number_of_seasons, None, None, None) | |
| playcount = 0 | |
| self.sysmeta = re.sub(r'"playcount": \d', '"playcount": %s' % playcount, self.sysmeta) | |
| for i in range(1, number_of_seasons+1): | |
| self.list.append({'tmdb_id': tmdb_id, 'tvdb_id': tvdb_id, 'season': i}) | |
| self.worker() | |
| if self.list == None or len(self.list) == 0: # nichts gefunden | |
| control.infoDialog("Nichts gefunden", time=8000) | |
| else: | |
| self.Directory(self.list) | |
| return self.list | |
| return | |
| except: | |
| pass # return ??? | |
| def worker(self): | |
| self.meta = [] | |
| with ThreadPoolExecutor() as executor: | |
| executor.map(self.super_meta, self.list) | |
| self.meta = sorted(self.meta, key=lambda k: k['season']) | |
| self.list = [i for i in self.meta] # falls noch eine Filterfunktion kommt | |
| def super_meta(self, i): | |
| try: | |
| meta = cTMDB().get_meta_seasons(i['tmdb_id'] , i['season'], advanced='true') | |
| try: | |
| playcount = playcountDB.getPlaycount('season', 'title', self.title, meta['season'], None) | |
| playcount = playcount if playcount else 0 | |
| overlay = 7 if playcount > 0 else 6 | |
| meta.update({'playcount': playcount, 'overlay': overlay}) | |
| except: | |
| pass | |
| self.meta.append(meta) | |
| except: | |
| pass | |
| def Directory(self, items): | |
| if items == None or len(items) == 0: | |
| control.idle() | |
| sys.exit() | |
| sysaddon = sys.argv[0] | |
| syshandle = int(sys.argv[1]) | |
| addonPoster, addonBanner = control.addonPoster(), control.addonBanner() | |
| addonFanart, settingFanart = control.addonFanart(), control.getSetting('fanart') | |
| watchedMenu = "In %s [I]Gesehen[/I]" % control.addonName | |
| unwatchedMenu = "In %s [I]Ungesehen[/I]" % control.addonName | |
| pos = 0 | |
| for i in items: | |
| try: | |
| meta = json.loads(self.sysmeta) | |
| sysmeta = json.loads(self.sysmeta) | |
| season = i['season'] | |
| systitle = sysmeta['systitle'] | |
| sysname = systitle + ' S%02d' % season | |
| sysmeta.update({'sysname': sysname}) | |
| sysmeta.update({'season': season}) | |
| sysmeta.update({'number_of_episodes': i['number_of_episodes']}) | |
| sysmeta.update({'episodes': i['episodes']}) | |
| _sysmeta = {k: v for k, v in sysmeta.items()} | |
| _sysmeta.pop('cast', None) | |
| _sysmeta.pop('episodes', None) | |
| _sysmeta = control.quote_plus(json.dumps(_sysmeta)) | |
| label = 'Staffel %s - %s' % (season, sysmeta['title']) | |
| if datetime.datetime(*(time.strptime(i['premiered'], "%Y-%m-%d")[0:6])) > datetime.datetime.now(): | |
| label = '[COLOR=red][I]{}[/I][/COLOR]'.format(label) # ffcc0000 | |
| poster = i['poster'] if 'poster' in i and 'http' in i['poster'] else sysmeta['poster'] | |
| fanart = sysmeta['fanart'] if 'fanart' in sysmeta else addonFanart | |
| plot = i['plot'] if 'plot' in i and len(i['plot']) > 50 else sysmeta['plot'] | |
| meta.update({'poster': poster}) | |
| meta.update({'fanart': fanart}) | |
| meta.update({'plot': plot}) | |
| #if 'air_date' in i and i['air_date']: meta.update({'air_date': i['air_date']}) | |
| if 'premiered' in i and i['premiered']: meta.update({'premiered': i['premiered']}) | |
| item = control.item(label=label, offscreen=True) | |
| item.setArt({'poster': poster, 'banner': addonBanner}) | |
| if settingFanart == 'true': item.setProperty('Fanart_Image', fanart) | |
| if sysmeta['playcount'] == 0: playcount = i['playcount'] | |
| else: playcount = 1 | |
| cm = [] | |
| try: | |
| if playcount == 1: | |
| cm.append((unwatchedMenu, 'RunPlugin(%s?action=UpdatePlayCount&meta=%s&playCount=0)' % (sysaddon, _sysmeta))) | |
| meta.update({'playcount': 1, 'overlay': 7}) | |
| sysmeta.update({'playcount': 1, 'overlay': 7}) | |
| pos = season +1 | |
| if len(items) == season: pos = season | |
| else: | |
| cm.append((watchedMenu, 'RunPlugin(%s?action=UpdatePlayCount&meta=%s&playCount=1)' % (sysaddon, _sysmeta))) | |
| meta.update({'playcount': 0, 'overlay': 6}) | |
| sysmeta.update({'playcount': 0, 'overlay': 6}) | |
| except: | |
| pass | |
| item.addContextMenuItems(cm) | |
| sysmeta = control.quote_plus(json.dumps(sysmeta)) | |
| url = '%s?action=episodes&sysmeta=%s' % (sysaddon, sysmeta) | |
| aActors = [] | |
| if 'cast' in meta and meta['cast']: aActors = meta['cast'] | |
| ## supported infolabels: https://codedocs.xyz/AlwinEsch/kodi/group__python__xbmcgui__listitem.html#ga0b71166869bda87ad744942888fb5f14 | |
| # # # remove unsupported InfoLabels | |
| meta.pop('cast', None) # ersetzt durch item.setCast(i['cast']) | |
| meta.pop('fanart', None) | |
| meta.pop('poster', None) | |
| meta.pop('imdb_id', None) | |
| meta.pop('tvdb_id', None) | |
| meta.pop('tmdb_id', None) | |
| meta.pop('number_of_seasons', None) | |
| meta.pop('number_of_episodes', None) | |
| meta.pop('originallanguage', None) | |
| meta.pop('sysname', None) | |
| meta.pop('systitle', None) | |
| meta.pop('year', None) | |
| meta.pop('aliases', None) | |
| meta.pop('backdrop_url', None) | |
| meta.pop('cover_url', None) | |
| # gefakte Video/Audio Infos | |
| # video_streaminfo = {'codec': 'h264', "width": 1920, "height": 1080} | |
| # audio_streaminfo = {'codec': 'dts', 'channels': 6, 'language': 'de'} | |
| video_streaminfo = {} | |
| audio_streaminfo = {} | |
| if int(getKodiVersion()) <= 19: | |
| if aActors: item.setCast(aActors) | |
| item.setInfo(type='Video', infoLabels=meta) | |
| item.addStreamInfo('video', video_streaminfo) | |
| item.addStreamInfo('audio', audio_streaminfo) | |
| else: | |
| info_tag = ListItemInfoTag(item, 'video') | |
| info_tag.set_info(meta) | |
| stream_details = { | |
| 'video': [video_streaminfo], | |
| 'audio': [audio_streaminfo]} | |
| info_tag.set_stream_details(stream_details) | |
| info_tag.set_cast(aActors) | |
| control.addItem(handle=syshandle, url=url, listitem=item, isFolder=True) | |
| except Exception as e: | |
| #print(e) #TODO LOG | |
| pass | |
| control.content(syshandle, 'tvshows') | |
| control.plugincategory(syshandle, control.addonVersion) | |
| control.endofdirectory(syshandle, cacheToDisc=True) | |
| # setzt Auswahl nach letzte als gesehen markierte Staffel -> Content: 'movies' | |
| if control.getSetting('status.position') == 'true': | |
| from resources.lib.utils import setPosition | |
| setPosition(pos, __name__, 'movies') |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # -*- coding: utf-8 -*- | |
| # Python 3 | |
| # Always pay attention to the translations in the menu! | |
| # Sprachauswahl für Hoster enthalten. | |
| # Ajax Suchfunktion enthalten. | |
| # HTML LangzeitCache hinzugefügt | |
| # showValue: 24 Stunden | |
| # showAllSeries: 24 Stunden | |
| # showEpisodes: 4 Stunden | |
| # SSsearch: 24 Stunden | |
| # 2022-12-06 Heptamer - Suchfunktion überarbeitet | |
| import xbmcgui | |
| from resources.lib.handler.ParameterHandler import ParameterHandler | |
| from resources.lib.handler.requestHandler import cRequestHandler | |
| from resources.lib.tools import logger, cParser, cUtil | |
| from resources.lib.gui.guiElement import cGuiElement | |
| from resources.lib.config import cConfig | |
| from resources.lib.gui.gui import cGui | |
| SITE_IDENTIFIER = 'serienstream' | |
| SITE_NAME = 'SerienStream' | |
| SITE_ICON = 'serienstream.png' | |
| # Global search function is thus deactivated! | |
| if cConfig().getSetting('global_search_' + SITE_IDENTIFIER) == 'false': | |
| SITE_GLOBAL_SEARCH = False | |
| logger.info('-> [SitePlugin]: globalSearch for %s is deactivated.' % SITE_NAME) | |
| # Domain Abfrage | |
| DOMAIN = cConfig().getSetting('plugin_' + SITE_IDENTIFIER + '.domain') # Domain Auswahl über die xStream Einstellungen möglich | |
| STATUS = cConfig().getSetting('plugin_' + SITE_IDENTIFIER + '_status') # Status Code Abfrage der Domain | |
| ACTIVE = cConfig().getSetting('plugin_' + SITE_IDENTIFIER) # Ob Plugin aktiviert ist oder nicht | |
| # URL_MAIN = 'https://s.to/' | |
| if DOMAIN == '186.2.175.5': # Bei Proxy Änderung nur IP hier in den Settings und in Zeile 53 tauschen. | |
| URL_MAIN = 'http://' + DOMAIN | |
| REFERER = 'http://' + DOMAIN | |
| proxy = 'true' | |
| else: | |
| URL_MAIN = 'https://' + DOMAIN | |
| REFERER = 'https://' + DOMAIN | |
| proxy = 'false' | |
| URL_SERIES = URL_MAIN + '/serien' | |
| URL_NEW_SERIES = URL_MAIN + '/neu' | |
| URL_NEW_EPISODES = URL_MAIN + '/neue-episoden' | |
| URL_POPULAR = URL_MAIN + '/beliebte-serien' | |
| URL_LOGIN = URL_MAIN + '/login' | |
| # Wenn DNS Bypass aktiv nutze Proxy Server | |
| if cConfig().getSetting('bypassDNSlock') == 'true': | |
| cConfig().setSetting('plugin_' + SITE_IDENTIFIER + '.domain', '186.2.175.5') | |
| # | |
| def load(): # Menu structure of the site plugin | |
| logger.info('Load %s' % SITE_NAME) | |
| params = ParameterHandler() | |
| username = cConfig().getSetting('serienstream.user')# Username | |
| password = cConfig().getSetting('serienstream.pass')# Password | |
| if username == '' or password == '': # If no username and password were set, close the plugin! | |
| xbmcgui.Dialog().ok(cConfig().getLocalizedString(30241), cConfig().getLocalizedString(30264)) # Info Dialog! | |
| else: | |
| params.setParam('sUrl', URL_SERIES) | |
| cGui().addFolder(cGuiElement(cConfig().getLocalizedString(30518), SITE_IDENTIFIER, 'showAllSeries'), params)# All Series | |
| params.setParam('sUrl', URL_NEW_SERIES) | |
| cGui().addFolder(cGuiElement(cConfig().getLocalizedString(30514), SITE_IDENTIFIER, 'showEntries'), params) # New Series | |
| params.setParam('sUrl', URL_NEW_EPISODES) | |
| cGui().addFolder(cGuiElement(cConfig().getLocalizedString(30516), SITE_IDENTIFIER, 'showNewEpisodes'), params) # New Episodes | |
| params.setParam('sUrl', URL_POPULAR) | |
| cGui().addFolder(cGuiElement(cConfig().getLocalizedString(30519), SITE_IDENTIFIER, 'showEntries'), params) # Popular Series | |
| params.setParam('sUrl', URL_MAIN) | |
| params.setParam('sCont', 'catalogNav') | |
| cGui().addFolder(cGuiElement(cConfig().getLocalizedString(30517), SITE_IDENTIFIER, 'showValue'), params) # From A-Z | |
| params.setParam('sCont', 'homeContentGenresList') | |
| cGui().addFolder(cGuiElement(cConfig().getLocalizedString(30506), SITE_IDENTIFIER, 'showValue'), params) # Genre | |
| cGui().addFolder(cGuiElement(cConfig().getLocalizedString(30520), SITE_IDENTIFIER, 'showSearch'), params) # Search | |
| cGui().setEndOfDirectory() | |
| def showValue(): | |
| params = ParameterHandler() | |
| sUrl = params.getValue('sUrl') | |
| oRequest = cRequestHandler(sUrl) | |
| if cConfig().getSetting('global_search_' + SITE_IDENTIFIER) == 'true': | |
| oRequest.cacheTime = 60 * 60 * 24 # HTML Cache Zeit 1 Tag | |
| sHtmlContent = oRequest.request() | |
| isMatch, sContainer = cParser.parseSingleResult(sHtmlContent, r'<ul[^>]*class="%s"[^>]*>(.*?)<\/ul>' % params.getValue('sCont')) | |
| if isMatch: | |
| isMatch, aResult = cParser.parse(sContainer, r'<li>\s*<a[^>]*href="([^"]*)"[^>]*>(.*?)<\/a>\s*<\/li>') | |
| aResult = sorted(aResult, key=lambda x: x[1].lower()) # Sort alphabetically by name (case-insensitive) | |
| if not isMatch: | |
| cGui().showInfo() | |
| return | |
| for sUrl, sName in aResult: | |
| sUrl = sUrl if sUrl.startswith('http') else URL_MAIN + sUrl | |
| params.setParam('sUrl', sUrl) | |
| cGui().addFolder(cGuiElement(sName, SITE_IDENTIFIER, 'showEntries'), params) | |
| cGui().setEndOfDirectory() | |
| def showAllSeries(entryUrl=False, sGui=False, sSearchText=False): | |
| oGui = sGui if sGui else cGui() | |
| params = ParameterHandler() | |
| if not entryUrl: entryUrl = params.getValue('sUrl') | |
| oRequest = cRequestHandler(entryUrl, ignoreErrors=(sGui is not False)) | |
| if cConfig().getSetting('global_search_' + SITE_IDENTIFIER) == 'true': | |
| oRequest.cacheTime = 60 * 60 * 24 # HTML Cache Zeit 1 Tag | |
| sHtmlContent = oRequest.request() | |
| pattern = '<a[^>]*href="(\\/serie\\/[^"]*)"[^>]*>(.*?)</a>' | |
| isMatch, aResult = cParser.parse(sHtmlContent, pattern) | |
| if not isMatch: | |
| if not sGui: oGui.showInfo() | |
| return | |
| aResult = sorted(aResult, key=lambda x: x[1].lower()) # Sort alphabetically by name (case-insensitive) | |
| total = len(aResult) | |
| for sUrl, sName in aResult: | |
| if sSearchText and not cParser.search(sSearchText, sName): | |
| continue | |
| oGuiElement = cGuiElement(sName, SITE_IDENTIFIER, 'showSeasons') | |
| oGuiElement.setMediaType('tvshow') | |
| params.setParam('sUrl', URL_MAIN + sUrl) | |
| params.setParam('TVShowTitle', sName) | |
| oGui.addFolder(oGuiElement, params, True, total) | |
| if not sGui: | |
| oGui.setView('tvshows') | |
| oGui.setEndOfDirectory() | |
| def showNewEpisodes(entryUrl=False, sGui=False): | |
| oGui = sGui if sGui else cGui() | |
| params = ParameterHandler() | |
| if not entryUrl: | |
| entryUrl = params.getValue('sUrl') | |
| oRequest = cRequestHandler(entryUrl, ignoreErrors=(sGui is not False)) | |
| if cConfig().getSetting('global_search_' + SITE_IDENTIFIER) == 'true': | |
| oRequest.cacheTime = 60 * 60 * 4 # HTML Cache Zeit 4 Stunden | |
| sHtmlContent = oRequest.request() | |
| pattern = r'<div[^>]*class="col-md-[^"]*"[^>]*>\s*<a[^>]*href="([^"]*)"[^>]*>\s*<strong>([^<]+)</strong>\s*<span[^>]*>([^<]+)</span>' | |
| isMatch, aResult = cParser.parse(sHtmlContent, pattern) | |
| if not isMatch: | |
| if not sGui: oGui.showInfo() | |
| return | |
| aResult = sorted(aResult, key=lambda x: x[1].lower()) # Sort alphabetically by series name (case-insensitive) | |
| total = len(aResult) | |
| for sUrl, sName, sInfo in aResult: | |
| sMovieTitle = sName + ' ' + sInfo | |
| oGuiElement = cGuiElement(sName, SITE_IDENTIFIER, 'showSeasons') | |
| oGuiElement.setMediaType('tvshow') | |
| oGuiElement.setTitle(sMovieTitle) | |
| params.setParam('sUrl', URL_MAIN + sUrl) | |
| params.setParam('TVShowTitle', sMovieTitle) | |
| oGui.addFolder(oGuiElement, params, True, total) | |
| if not sGui: | |
| oGui.setView('tvshows') | |
| oGui.setEndOfDirectory() | |
| def showEntries(entryUrl=False, sGui=False): | |
| oGui = sGui if sGui else cGui() | |
| params = ParameterHandler() | |
| if not entryUrl: | |
| entryUrl = params.getValue('sUrl') | |
| oRequest = cRequestHandler(entryUrl, ignoreErrors=(sGui is not False)) | |
| if cConfig().getSetting('global_search_' + SITE_IDENTIFIER) == 'true': | |
| oRequest.cacheTime = 60 * 60 * 6 # HTML Cache Zeit 6 Stunden | |
| sHtmlContent = oRequest.request() | |
| #Aufbau pattern | |
| #'<div[^>]*class="col-md-[^"]*"[^>]*>.*?' # start element | |
| #'<a[^>]*href="([^"]*)"[^>]*>.*?' # url | |
| #'data-src="([^"]*).*?' # thumbnail | |
| #'<h3>(.*?)<span[^>]*class="paragraph-end">.*?' # title | |
| #'<\\/div>' # end element | |
| pattern = '<div[^>]*class="col-md-[^"]*"[^>]*>.*?<a[^>]*href="([^"]*)"[^>]*>.*?data-src="([^"]*).*?<h3>(.*?)<span[^>]*class="paragraph-end">.*?</div>' | |
| isMatch, aResult = cParser.parse(sHtmlContent, pattern) | |
| if not isMatch: | |
| if not sGui: oGui.showInfo() | |
| return | |
| aResult = sorted(aResult, key=lambda x: x[2].lower()) # Sort alphabetically by name (case-insensitive) | |
| total = len(aResult) | |
| for sUrl, sThumbnail, sName in aResult: | |
| if sThumbnail.startswith('/'): | |
| sThumbnail = URL_MAIN + sThumbnail | |
| oGuiElement = cGuiElement(sName, SITE_IDENTIFIER, 'showSeasons') | |
| oGuiElement.setThumbnail(sThumbnail) | |
| oGuiElement.setMediaType('tvshow') | |
| params.setParam('sUrl', URL_MAIN + sUrl) | |
| params.setParam('TVShowTitle', sName) | |
| oGui.addFolder(oGuiElement, params, True, total) | |
| if not sGui: | |
| pattern = 'pagination">.*?<a href="([^"]+)">></a>.*?</a></div>' | |
| isMatchNextPage, sNextUrl = cParser.parseSingleResult(sHtmlContent, pattern) | |
| if isMatchNextPage: | |
| params.setParam('sUrl', sNextUrl) | |
| oGui.addNextPage(SITE_IDENTIFIER, 'showEntries', params) | |
| oGui.setView('tvshows') | |
| oGui.setEndOfDirectory() | |
| def showSeasons(): | |
| params = ParameterHandler() | |
| sUrl = params.getValue('sUrl') | |
| sTVShowTitle = params.getValue('TVShowTitle') | |
| oRequest = cRequestHandler(sUrl) | |
| sHtmlContent = oRequest.request() | |
| pattern = '<div[^>]*class="hosterSiteDirectNav"[^>]*>.*?<ul>(.*?)<\\/ul>' | |
| isMatch, sContainer = cParser.parseSingleResult(sHtmlContent, pattern) | |
| if isMatch: | |
| pattern = '<a[^>]*href="([^"]*)"[^>]*title="([^"]*)"[^>]*>(.*?)</a>.*?' | |
| isMatch, aResult = cParser.parse(sContainer, pattern) | |
| if not isMatch: | |
| cGui().showInfo() | |
| return | |
| isDesc, sDesc = cParser.parseSingleResult(sHtmlContent, '<p[^>]*data-full-description="(.*?)"[^>]*>') | |
| isThumbnail, sThumbnail = cParser.parseSingleResult(sHtmlContent, '<div[^>]*class="seriesCoverBox"[^>]*>.*?data-src="([^"]*)"[^>]*>') | |
| if isThumbnail: | |
| if sThumbnail.startswith('/'): | |
| sThumbnail = URL_MAIN + sThumbnail | |
| total = len(aResult) | |
| for sUrl, sName, sNr in aResult: | |
| isMovie = sUrl.endswith('filme') | |
| if 'Alle Filme' in sName: | |
| sName = 'Filme' | |
| oGuiElement = cGuiElement(sName, SITE_IDENTIFIER, 'showEpisodes') | |
| oGuiElement.setMediaType('season' if not isMovie else 'movie') | |
| if isThumbnail: | |
| oGuiElement.setThumbnail(sThumbnail) | |
| if isDesc: | |
| oGuiElement.setDescription(sDesc) | |
| if not isMovie: | |
| oGuiElement.setTVShowTitle(sTVShowTitle) | |
| oGuiElement.setSeason(sNr) | |
| params.setParam('sSeason', sNr) | |
| params.setParam('sThumbnail', sThumbnail) | |
| params.setParam('sUrl', URL_MAIN + sUrl) | |
| cGui().addFolder(oGuiElement, params, True, total) | |
| cGui().setView('seasons') | |
| cGui().setEndOfDirectory() | |
| def showEpisodes(): | |
| params = ParameterHandler() | |
| sUrl = params.getValue('sUrl') | |
| sTVShowTitle = params.getValue('TVShowTitle') | |
| sSeason = params.getValue('sSeason') | |
| sThumbnail = params.getValue('sThumbnail') | |
| if not sSeason: | |
| sSeason = '0' | |
| isMovieList = sUrl.endswith('filme') | |
| oRequest = cRequestHandler(sUrl) | |
| if cConfig().getSetting('global_search_' + SITE_IDENTIFIER) == 'true': | |
| oRequest.cacheTime = 60 * 60 * 4 # HTML Cache Zeit 4 Stunden | |
| sHtmlContent = oRequest.request() | |
| pattern = '<table[^>]*class="seasonEpisodesList"[^>]*>(.*?)</table>' | |
| isMatch, sContainer = cParser.parseSingleResult(sHtmlContent, pattern) | |
| if isMatch: | |
| if isMovieList == True: | |
| pattern = r'<tr[^>]*data-episode-season-id="(\d+).*?<a href="([^"]+)">\s([^<]+).*?<strong>([^<]+)' | |
| isMatch, aResult = cParser.parse(sContainer, pattern) | |
| if not isMatch: | |
| pattern = r'<tr[^>]*data-episode-season-id="(\d+).*?<a href="([^"]+)">\s([^<]+).*?<span>([^<]+)' | |
| isMatch, aResult = cParser.parse(sContainer, pattern) | |
| else: | |
| pattern = r'<tr[^>]*data-episode-season-id="(\d+).*?<a href="([^"]+).*?(?:<strong>(.*?)</strong>.*?)?(?:<span>(.*?)</span>.*?)?<' | |
| isMatch, aResult = cParser.parse(sContainer, pattern) | |
| if not isMatch: | |
| cGui().showInfo() | |
| return | |
| isDesc, sDesc = cParser.parseSingleResult(sHtmlContent, '<p[^>]*data-full-description="(.*?)"[^>]*>') | |
| total = len(aResult) | |
| for sID, sUrl2, sNameGer, sNameEng in aResult: | |
| sName = '%d - ' % int(sID) | |
| if isMovieList == True: | |
| sName += sNameGer + '- ' + sNameEng | |
| else: | |
| sName += sNameGer if sNameGer else sNameEng | |
| oGuiElement = cGuiElement(sName, SITE_IDENTIFIER, 'showHosters') | |
| oGuiElement.setMediaType('episode' if not isMovieList else 'movie') | |
| oGuiElement.setThumbnail(sThumbnail) | |
| if isDesc: | |
| oGuiElement.setDescription(sDesc) | |
| if not isMovieList: | |
| oGuiElement.setSeason(sSeason) | |
| oGuiElement.setEpisode(int(sID)) | |
| oGuiElement.setTVShowTitle(sTVShowTitle) | |
| params.setParam('sUrl', URL_MAIN + sUrl2) | |
| params.setParam('entryUrl', sUrl) | |
| cGui().addFolder(oGuiElement, params, False, total) | |
| cGui().setView('episodes' if not isMovieList else 'movies') | |
| cGui().setEndOfDirectory() | |
| def showHosters(): | |
| hosters = [] | |
| sUrl = ParameterHandler().getValue('sUrl') | |
| sHtmlContent = cRequestHandler(sUrl, caching=False).request() | |
| if cConfig().getSetting('plugin_' + SITE_IDENTIFIER + '.domain') == 'serienstream.stream': | |
| pattern = r'<li[^>]*episodeLink([^"]+)"\sdata-lang-key="([^"]+).*?data-link-target=([^"]+).*?<h4>([^<]+)<([^>]+)' | |
| pattern2 = r'itemprop="keywords".content=".*?Season...([^"]+).S.*?' # HD Kennzeichen | |
| # data-lang-key="1" Deutsch | |
| # data-lang-key="2" Englisch | |
| # data-lang-key="3" Englisch mit deutschen Untertitel | |
| isMatch, aResult = cParser.parse(sHtmlContent, pattern) | |
| aResult2 = cParser.parse(sHtmlContent, pattern2) # pattern 2 auslesen | |
| if isMatch: | |
| for sID, sLang, sUrl, sName, sQuality in aResult: | |
| sUrl = sUrl.replace(sUrl, '') | |
| sUrl = sUrl.replace('', '/redirect/' + sID) | |
| if cConfig().isBlockedHoster(sName)[0]: continue # Hoster aus settings.xml oder deaktivierten Resolver ausschließen | |
| sLanguage = cConfig().getSetting('prefLanguage') | |
| if sLanguage == '1': # Voreingestellte Sprache Deutsch in settings.xml | |
| if '2' in sLang: # data-lang-key="2" English | |
| continue | |
| if '3' in sLang: # data-lang-key="3" Englisch mit deutschen Untertitel | |
| continue | |
| if sLang == '1': # data-lang-key="1" Deutsch | |
| sLang = '(DE)' # Anzeige der Sprache Deutsch | |
| if sLanguage == '2': # Voreingestellte Sprache Englisch in settings.xml | |
| if '1' in sLang: # data-lang-key="1" Deutsch | |
| continue | |
| if '3' in sLang: # data-lang-key="3" Englisch mit deutschen Untertitel | |
| continue | |
| if sLang == '2': # data-lang-key="2" English | |
| sLang = '(EN)' # Anzeige der Sprache | |
| if sLanguage == '3': # Voreingestellte Sprache Japanisch in settings.xml | |
| cGui().showLanguage() # Kein Eintrag in der ausgewählten Sprache verfügbar | |
| continue | |
| if sLanguage == '0': # Alle Sprachen | |
| if sLang == '1': # data-lang-key="1" Deutsch | |
| sLang = '(DE)' # Anzeige der Sprache Deutsch | |
| if sLang == '2': # data-lang-key="2" Englisch | |
| sLang = '(EN)' # Anzeige der Sprache Englisch | |
| elif sLang == '3': # data-lang-key="3" Englisch mit deutschen Untertitel | |
| sLang = '(EN) Sub: (DE)' # Anzeige der Sprache Englisch mit deutschen Untertitel | |
| if 'HD' in aResult2[1]: # Prüfen ob tuple aResult2 das Kennzeichen HD enthält, dann übersteuern | |
| sQuality = '720' | |
| else: | |
| sQuality = '480' | |
| # Ab hier wird der sName mit abgefragt z.B: | |
| # aus dem Log [serienstream]: ['/redirect/12286260', 'VOE'] | |
| # hier ist die sUrl = '/redirect/12286260' und der sName 'VOE' | |
| # hoster.py 194 | |
| hoster = {'link': [sUrl, sName], 'name': sName, 'displayedName': '%s [I]%s [%sp][/I]' % (sName, sLang, sQuality), 'quality': sQuality, 'languageCode': sLang} # Language Code für hoster.py Sprache Prio | |
| hosters.append(hoster) | |
| if hosters: | |
| hosters.append('getHosterUrl') | |
| if not hosters: | |
| cGui().showLanguage() | |
| return hosters | |
| else: | |
| pattern = r'<li[^>]*data-lang-key="([^"]+).*?data-link-target="([^"]+).*?<h4>([^<]+)<([^>]+)' | |
| pattern2 = r'itemprop="keywords".content=".*?Season...([^"]+).S.*?' # HD Kennzeichen | |
| # data-lang-key="1" Deutsch | |
| # data-lang-key="2" Englisch | |
| # data-lang-key="3" Englisch mit deutschen Untertitel | |
| isMatch, aResult = cParser.parse(sHtmlContent, pattern) | |
| aResult2 = cParser.parse(sHtmlContent, pattern2) # pattern 2 auslesen | |
| if isMatch: | |
| for sLang, sUrl, sName, sQuality in aResult: | |
| if cConfig().isBlockedHoster(sName)[0]: continue # Hoster aus settings.xml oder deaktivierten Resolver ausschließen | |
| sLanguage = cConfig().getSetting('prefLanguage') | |
| if sLanguage == '1': # Voreingestellte Sprache Deutsch in settings.xml | |
| if '2' in sLang: # data-lang-key="2" | |
| continue | |
| if '3' in sLang: # data-lang-key="3" | |
| continue | |
| if sLang == '1': # data-lang-key="1" | |
| sLang = '(DE)' # Anzeige der Sprache | |
| if sLanguage == '2': # Voreingestellte Sprache Englisch in settings.xml | |
| if '1' in sLang: # data-lang-key="1" | |
| continue | |
| if '3' in sLang: # data-lang-key="3" | |
| continue | |
| if sLang == '2': # data-lang-key="2" | |
| sLang = '(EN)' # Anzeige der Sprache | |
| if sLanguage == '3': # Voreingestellte Sprache Japanisch in settings.xml | |
| cGui().showLanguage() # Kein Eintrag in der ausgewählten Sprache verfügbar | |
| continue | |
| if sLanguage == '0': # Alle Sprachen | |
| if sLang == '1': # data-lang-key="1" | |
| sLang = '(DE)' # Anzeige der Sprache | |
| if sLang == '2': # data-lang-key="2" | |
| sLang = '(EN)' # Anzeige der Sprache | |
| elif sLang == '3': # data-lang-key="3" | |
| sLang = '(EN) Sub: (DE)' # Anzeige der Sprache | |
| if 'HD' in aResult2[1]: # Prüfen ob tuple aResult2 das Kennzeichen HD enthält, dann übersteuern | |
| sQuality = '720' | |
| else: | |
| sQuality = '480' | |
| # Ab hier wird der sName mit abgefragt z.B: | |
| # aus dem Log [serienstream]: ['/redirect/12286260', 'VOE'] | |
| # hier ist die sUrl = '/redirect/12286260' und der sName 'VOE' | |
| # hoster.py 194 | |
| hoster = {'link': [sUrl, sName], 'name': sName, 'displayedName': '%s [I]%s [%sp][/I]' % (sName, sLang, sQuality), 'quality': sQuality, 'languageCode': sLang} # Language Code für hoster.py Sprache Prio | |
| hosters.append(hoster) | |
| if hosters: | |
| hosters.append('getHosterUrl') | |
| if not hosters: | |
| cGui().showLanguage() | |
| return hosters | |
| def getHosterUrl(hUrl): | |
| if type(hUrl) == str: hUrl = eval(hUrl) | |
| username = cConfig().getSetting('serienstream.user') | |
| password = cConfig().getSetting('serienstream.pass') | |
| Handler = cRequestHandler(URL_LOGIN, caching=False) | |
| Handler.addHeaderEntry('Upgrade-Insecure-Requests', '1') | |
| Handler.addHeaderEntry('Referer', ParameterHandler().getValue('entryUrl')) | |
| Handler.addParameters('email', username) | |
| Handler.addParameters('password', password) | |
| Handler.request() | |
| Request = cRequestHandler(URL_MAIN + hUrl[0], caching=False) | |
| Request.addHeaderEntry('Referer', ParameterHandler().getValue('entryUrl')) | |
| Request.addHeaderEntry('Upgrade-Insecure-Requests', '1') | |
| Request.request() | |
| sUrl = Request.getRealUrl() | |
| if 'voe' in hUrl[1].lower(): | |
| isBlocked, sDomain = cConfig().isBlockedHoster(sUrl) # Die funktion gibt 2 werte zurück! | |
| if isBlocked: # Voe Pseudo sDomain nicht bekannt in resolveUrl | |
| sUrl = sUrl.replace(sDomain, 'voe.sx') | |
| return [{'streamUrl': sUrl, 'resolved': False}] | |
| return [{'streamUrl': sUrl, 'resolved': False}] | |
| def showSearch(): | |
| sSearchText = cGui().showKeyBoard(sHeading=cConfig().getLocalizedString(30281)) | |
| if not sSearchText: return | |
| _search(False, sSearchText) | |
| cGui().setEndOfDirectory() | |
| def _search(oGui, sSearchText): | |
| SSsearch(oGui, sSearchText) | |
| def SSsearch(sGui=False, sSearchText=False): | |
| oGui = sGui if sGui else cGui() | |
| params = ParameterHandler() | |
| params.getValue('sSearchText') | |
| oRequest = cRequestHandler(URL_SERIES, caching=True, ignoreErrors=(sGui is not False)) | |
| oRequest.addHeaderEntry('X-Requested-With', 'XMLHttpRequest') | |
| oRequest.addHeaderEntry('Referer', REFERER + '/serien') | |
| oRequest.addHeaderEntry('Origin', REFERER) | |
| oRequest.addHeaderEntry('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8') | |
| oRequest.addHeaderEntry('Upgrade-Insecure-Requests', '1') | |
| if cConfig().getSetting('global_search_' + SITE_IDENTIFIER) == 'true': | |
| oRequest.cacheTime = 60 * 60 * 24 # HTML Cache Zeit 1 Tag | |
| sHtmlContent = oRequest.request() | |
| if not sHtmlContent: | |
| return | |
| sst = sSearchText.lower() | |
| pattern = '<li><a data.+?href="([^"]+)".+?">(.*?)\<\/a><\/l' #link - title | |
| aResult = cParser.parse(sHtmlContent, pattern) | |
| if not aResult[0]: | |
| oGui.showInfo() | |
| return | |
| results = sorted(aResult[1], key=lambda x: x[1].lower()) # Sort alphabetically by title (case-insensitive) | |
| total = len(results) | |
| for link, title in results: | |
| titleLow = title.lower() | |
| if not sst in titleLow and not cUtil.isSimilarByToken(sst, titleLow): | |
| continue | |
| else: | |
| #get images thumb / descr pro call. (optional) | |
| try: | |
| sThumbnail, sDescription = getMetaInfo(link, title) | |
| oGuiElement = cGuiElement(title, SITE_IDENTIFIER, 'showSeasons') | |
| oGuiElement.setThumbnail(URL_MAIN + sThumbnail) | |
| oGuiElement.setDescription(sDescription) | |
| oGuiElement.setTVShowTitle(title) | |
| oGuiElement.setMediaType('tvshow') | |
| params.setParam('sUrl', URL_MAIN + link) | |
| params.setParam('sName', title) | |
| oGui.addFolder(oGuiElement, params, True, total) | |
| except Exception: | |
| oGuiElement = cGuiElement(title, SITE_IDENTIFIER, 'showSeasons') | |
| oGuiElement.setTVShowTitle(title) | |
| oGuiElement.setMediaType('tvshow') | |
| params.setParam('sUrl', URL_MAIN + link) | |
| params.setParam('sName', title) | |
| oGui.addFolder(oGuiElement, params, True, total) | |
| if not sGui: | |
| oGui.setView('tvshows') | |
| def getMetaInfo(link, title): # Setzen von Metadata in Suche: | |
| oGui = cGui() | |
| oRequest = cRequestHandler(URL_MAIN + link, caching=False) | |
| oRequest.addHeaderEntry('X-Requested-With', 'XMLHttpRequest') | |
| oRequest.addHeaderEntry('Referer', REFERER + '/serien') | |
| oRequest.addHeaderEntry('Origin', REFERER) | |
| #GET CONTENT OF HTML | |
| sHtmlContent = oRequest.request() | |
| if not sHtmlContent: | |
| return | |
| pattern = 'seriesCoverBox">.*?data-src="([^"]+).*?data-full-description="([^"]+)"' #img , descr | |
| aResult = cParser.parse(sHtmlContent, pattern) | |
| if not aResult[0]: | |
| return | |
| for sImg, sDescr in aResult[1]: | |
| return sImg, sDescr |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # 2022-07-19 | |
| import base64 | |
| import hashlib | |
| import re | |
| import requests | |
| from resources.lib.control import urlparse, py2_encode | |
| import pyaes | |
| from scrapers.modules import cleantitle | |
| from resources.lib import log_utils | |
| try: | |
| import HTMLParser | |
| unescape = HTMLParser.HTMLParser().unescape | |
| except: | |
| from html import unescape | |
| RES_4K = ['4k', 'hd4k', '4khd', 'uhd', 'ultrahd', 'ultra-hd', '2160', '2160p', '2160i', 'hd2160', '2160hd', | |
| '1716p', '1716i', 'hd1716', '1716hd', '2664p', '2664i', 'hd2664', '2664hd', '3112p', | |
| '3112i', 'hd3112', '3112hd', '2880p', '2880i', 'hd2880', '2880hd'] | |
| RES_1080 = ['1080', '1080p', '1080i', 'hd1080', '1080hd', '1200p', '1200i', 'hd1200', '1200hd'] | |
| RES_720 = ['720', '720p', '720i', 'hd720', '720hd', 'hdtv', '.hd.'] | |
| RES_SD = ['576', '576p', '576i', 'sd576', '576sd', '480', '480p', '480i', 'sd480', '480sd', '360', '360p', | |
| '360i', 'sd360', '360sd', '240', '240p', '240i', 'sd240', '240sd'] | |
| SCR = ['dvdscr', 'screener', '.scr.', 'r5', 'r6'] | |
| CAM = ['camrip', 'cam.rip', 'tsrip', '.ts.rip.', 'dvdcam', 'dvd.cam', 'dvdts', 'dvd.ts.', 'cam', 'telesync', 'tele.sync'] | |
| HDCAM = ['hdcam', '.hd.cam.', 'hdts', '.hd.ts.', '.hdtc.', '.hd.tc.'] | |
| CODEC_H265 = ['hevc', 'h265', 'h.265', 'x265', 'x.265'] | |
| CODEC_H264 = ['avc', 'h264', 'h.264', 'x264', 'x.264'] | |
| CODEC_XVID = ['xvid', 'x.vid', 'x-vid'] | |
| CODEC_DIVX = ['divx', 'divx ', 'div2', 'div2 ', 'div3'] | |
| CODEC_MPEG = ['mp4', 'mpeg', 'm4v', 'mpg', 'mpg1', 'mpg2', 'mpg3', 'mpg4', 'mp4 ', 'msmpeg', 'msmpeg4', | |
| 'mpegurl'] | |
| CODEC_AVI = ['avi'] | |
| CODEC_MKV = ['mkv', '.mkv', 'matroska'] | |
| AUDIO_8CH = ['ch8', '8ch', '7.1', '7-1'] | |
| AUDIO_7CH = ['ch7', '7ch', '6.1', '6-1'] | |
| AUDIO_6CH = ['ch6', '6ch', '5.1', '5-1'] | |
| AUDIO_2CH = ['ch2', '2ch', '2.0', 'stereo'] | |
| AUDIO_1CH = ['ch1', '1ch', 'mono', 'monoaudio'] | |
| VIDEO_3D = ['3d', 'sbs', 'hsbs', 'sidebyside', 'side.by.side', 'stereoscopic', 'tab', 'htab', 'topandbottom', | |
| 'top.and.bottom'] | |
| MULTI_LANG = ['hindi.eng', 'ara.eng', 'ces.eng', 'chi.eng', 'cze.eng', 'dan.eng', 'dut.eng', 'ell.eng', 'esl.eng', | |
| 'esp.eng', 'fin.eng', 'fra.eng', 'fre.eng', 'frn.eng', 'gai.eng', 'ger.eng', 'gle.eng', 'gre.eng', | |
| 'gtm.eng', 'heb.eng', 'hin.eng', 'hun.eng', 'ind.eng', 'iri.eng', 'ita.eng', 'jap.eng', 'jpn.eng', 'kor.eng', | |
| 'lat.eng', 'lebb.eng', 'lit.eng', 'nor.eng', 'pol.eng', 'por.eng', 'rus.eng', 'som.eng', 'spa.eng', 'sve.eng', | |
| 'swe.eng', 'tha.eng', 'tur.eng', 'uae.eng', 'ukr.eng', 'vie.eng', 'zho.eng', 'dual.audio', 'multi'] | |
| LANG = ['arabic', 'bgaudio', 'dutch', 'finnish', 'french', 'german', 'greek', 'italian', 'latino', 'polish', 'portuguese', | |
| 'russian', 'spanish', 'truefrech', 'truespanish', 'turkish', 'hebrew'] | |
| UNDESIREABLES = ['alexfilm', 'baibako', 'coldfilm', 'eniahd', 'extras.only', 'gears media', 'jaskier', 'hamsterstudio', | |
| 'ideafilm', 'kerob', 'lakefilm', 'lostfilm', 'newstudio', 'profix media', 'sample', 'soundtrack', 'teaser', 'vostfr'] | |
| DUBBED = ['dublado', 'dubbed'] | |
| SUBS = ['subs', 'subtitula', 'subfrench', 'subspanish', 'swesub'] | |
| ADDS = ['1xbet', 'betwin'] | |
| def get_qual(term): | |
| if any(i in term for i in RES_4K): | |
| return '4K' | |
| elif any(i in term for i in RES_1080): | |
| return '1080p' | |
| elif any(i in term for i in RES_720): | |
| return '720p' | |
| elif any(i in term for i in RES_SD): | |
| return 'SD' | |
| elif any(i in term for i in SCR): | |
| return 'SCR' | |
| elif any(i in term for i in CAM): | |
| return 'CAM' | |
| elif any(i in term for i in HDCAM): | |
| return 'CAM' | |
| # def is_anime(content, type, type_id): | |
| # from openscrapers.modules import trakt | |
| # try: | |
| # r = trakt.getGenre(content, type, type_id) | |
| # return 'anime' in r or 'animation' in r | |
| # except: | |
| # return False | |
| def get_release_quality(release_name, release_link=None): | |
| if release_name is None: | |
| return | |
| try: | |
| release_name = py2_encode(release_name) | |
| except: | |
| pass | |
| try: | |
| quality = None | |
| release_name = release_name.upper() | |
| fmt = re.sub(r'(.+)(\d{4}|S\d+E\d+)(\.|\)\.|\)|\]\.|\]|\s)', '', release_name) | |
| # log_utils.log('fmt = %s' % fmt, log_utils.LOGDEBUG) | |
| fmt = fmt.lower() | |
| quality = get_qual(fmt) | |
| if not quality: | |
| if release_link: | |
| release_link = release_link.lower() | |
| try: | |
| release_link = py2_encode(release_link) | |
| except: | |
| pass | |
| quality = get_qual(release_link) | |
| if not quality: | |
| quality = 'SD' | |
| else: | |
| quality = 'SD' | |
| info = [] | |
| if any(value in fmt for value in VIDEO_3D): | |
| info.append('3D') | |
| if any(value in fmt for value in CODEC_H265): | |
| info.append('HEVC') | |
| return quality, info | |
| except: | |
| log_utils.error() | |
| return 'SD', [] | |
| def getFileType(url): | |
| try: | |
| url = url.lower() | |
| url = url.replace(' ', '.') | |
| except: | |
| url = str(url) | |
| type = '' | |
| if any(value in url for value in ['bluray', 'blu-ray', 'blu.ray']): | |
| type += ' BLURAY /' | |
| if any(value in url for value in ['bd-r', 'bd.r', 'bdr', 'bd-rip', 'bd.rip', 'bdrip', 'brrip', 'br.rip']): | |
| type += ' BR-RIP /' | |
| if 'remux' in url: | |
| type += ' REMUX /' | |
| if any(i in url for i in ['dvd-rip', 'dvd.rip', 'dvdrip']): | |
| type += ' DVD /' | |
| if any(value in url for value in ['web-dl', 'web.dl', 'webdl', 'web-rip', 'web.rip', 'webrip']): | |
| type += ' WEB /' | |
| if 'hdtv' in url: | |
| type += ' HDTV /' | |
| if 'sdtv' in url: | |
| type += ' SDTV /' | |
| if any(value in url for value in ['hd-rip', 'hd.rip', 'hdrip']): | |
| type += ' HDRIP /' | |
| if 'hdr.' in url: | |
| type += ' HDR /' | |
| if any(value in url for value in ['dd5.1', 'dd-5.1', 'dolby-digital', 'dolby.digital']): | |
| type += ' DOLBYDIGITAL /' | |
| if any(value in url for value in ['.ddex', 'dd-ex', 'dolby-ex', 'dolby.digital.ex']): | |
| type += ' DD-EX /' | |
| if any(value in url for value in ['dolby-digital-plus', 'dolby.digital.plus', 'ddplus', 'dd-plus']): | |
| type += ' DD+ /' | |
| if any(value in url for value in ['true-hd', 'truehd', '.ddhd']): | |
| type += ' DOLBY-TRUEHD /' | |
| if 'atmos' in url: | |
| type += ' ATMOS /' | |
| if '.dts.' in url: | |
| type += ' DTS /' | |
| if any(value in url for value in ['dts-hd', 'dtshd', 'dts.hd']): | |
| type += ' DTS-HD /' | |
| if any(value in url for value in ['dts-es', 'dtses', 'dts.es']): | |
| type += ' DTS-ES /' | |
| if any(value in url for value in ['dts-neo', 'dtsneo', 'dts.neo']): | |
| type += ' DTS-NEO /' | |
| if '.thx.' in url: | |
| type += ' THX /' | |
| if any(value in url for value in ['.thx-ex', 'thxex']): | |
| type += ' THX-EX /' | |
| if any(value in url for value in AUDIO_8CH): | |
| type += ' 8CH /' | |
| if any(value in url for value in AUDIO_7CH): | |
| type += ' 7CH /' | |
| if any(value in url for value in AUDIO_6CH): | |
| type += ' 6CH /' | |
| if 'xvid' in url: | |
| type += ' XVID /' | |
| if 'divx' in url: | |
| type += ' DIVX /' | |
| if any(value in url for value in CODEC_MPEG): | |
| type += ' MPEG /' | |
| if '.avi' in url: | |
| type += ' AVI /' | |
| if 'ac3' in url: | |
| type += ' AC3 /' | |
| if any(value in url for value in CODEC_H264): | |
| type += ' X264 /' | |
| if any(value in url for value in CODEC_H265): | |
| type += ' X265 /' | |
| if any(value in url for value in CODEC_MKV): | |
| type += ' MKV /' | |
| if any(value in url for value in HDCAM): | |
| type += ' HDCAM /' | |
| if any(value in url for value in MULTI_LANG): | |
| type += ' MULTI-LANG /' | |
| if any(value in url for value in ADDS): | |
| type += ' ADDS /' | |
| if any(value in url for value in SUBS): | |
| if type != '': | |
| type += ' WITH SUBS' | |
| else: | |
| type = 'SUBS' | |
| type = type.rstrip('/') | |
| return type | |
| def check_url(url): | |
| try: | |
| url = url.lower() | |
| try: | |
| url = py2_encode(url) | |
| except: | |
| pass | |
| quality = get_qual(url) | |
| if not quality: | |
| quality = 'SD' | |
| return quality | |
| except: | |
| log_utils.error() | |
| return 'SD' | |
| def check_title(title, name, hdlr, year): | |
| try: | |
| match = True | |
| title = title.replace('!', '') | |
| n = name.lower() | |
| h = hdlr.lower() | |
| t = n.split(h)[0].replace(year, '').replace('(', '').replace(')', '').replace('&', 'and').replace('.us.', '.') | |
| # log_utils.log('cleantitle.get(t) = %s' % cleantitle.get(t), log_utils.LOGDEBUG) | |
| # log_utils.log('cleantitle.get(title) = %s' % cleantitle.get(title), log_utils.LOGDEBUG) | |
| if cleantitle.get(t) != cleantitle.get(title): | |
| match = False | |
| if h not in n: | |
| match = False | |
| return match | |
| except: | |
| log_utils.error() | |
| match = False | |
| def label_to_quality(label): | |
| try: | |
| try: | |
| label = int(re.search(r'(\d+)', label).group(1)) | |
| except: | |
| label = 0 | |
| if label >= 2160: | |
| return '4K' | |
| elif 1920 <= label: | |
| return '1080p' | |
| elif 1280 <= label: | |
| return '720p' | |
| elif label <= 576: | |
| return 'SD' | |
| except: | |
| log_utils.error() | |
| return 'SD' | |
| def strip_domain(url): | |
| try: | |
| if url.lower().startswith('http') or url.startswith('/'): | |
| url = re.findall(r'(?://.+?|)(/.+)', url)[0] | |
| url = replaceHTMLCodes(url) | |
| url = py2_encode(url) | |
| return url | |
| except: | |
| log_utils.error() | |
| return | |
| def replaceHTMLCodes(txt): | |
| # Some HTML entities are encoded twice. Decode double. | |
| return _replaceHTMLCodes(_replaceHTMLCodes(txt)) | |
| def _replaceHTMLCodes(txt): | |
| txt = re.sub(r"(&#[0-9]+)([^;^0-9]+)", r"\1;\2", txt) | |
| txt = unescape(txt) | |
| txt = txt.replace(""", "\"") | |
| txt = txt.replace("&", "&") | |
| txt = txt.strip() | |
| return txt | |
| def is_host_valid(url, domains): | |
| try: | |
| if any(x in url.lower() for x in ['.rar.', '.zip.', '.iso.']) or any( | |
| url.lower().endswith(x) for x in ['.rar', '.zip', '.iso']): | |
| return False, '' | |
| host = __top_domain(url) | |
| hosts = [domain.lower() for domain in domains if host and host in domain.lower()] | |
| if hosts and '.' not in host: | |
| host = hosts[0] | |
| if hosts and any([h for h in ['google', 'picasa', 'blogspot'] if h in host]): | |
| host = 'gvideo' | |
| if hosts and any([h for h in ['akamaized', 'ocloud'] if h in host]): | |
| host = 'CDN' | |
| return any(hosts), host | |
| except: | |
| log_utils.error() | |
| return False, '' | |
| def __top_domain(url): | |
| elements = urlparse(url) | |
| domain = elements.netloc or elements.path | |
| domain = domain.split('@')[-1].split(':')[0] | |
| regex = r"(?:www\.)?([\w\-]*\.[\w\-]{2,3}(?:\.[\w\-]{2,3})?)$" | |
| res = re.search(regex, domain) | |
| if res: | |
| domain = res.group(1) | |
| domain = domain.lower() | |
| return domain | |
| def aliases_to_array(aliases, filter=None): | |
| try: | |
| if not filter: | |
| filter = [] | |
| if isinstance(filter, type(u"")): | |
| filter = [filter] | |
| return [x.get('title') for x in aliases if not filter or x.get('country') in filter] | |
| except: | |
| log_utils.error() | |
| return [] | |
| # def _size(siz): | |
| # if siz in ['0', 0, '', None]: return 0, '' | |
| # div = 1 if siz.lower().endswith(('gb', 'gib')) else 1024 | |
| # float_size = float(re.sub('[^0-9|/.|/,]', '', siz.replace(',', ''))) / div | |
| # str_size = '%.2f GB' % float_size | |
| # return float_size, str_size | |
| # def get_size(url): # not called | |
| # try: | |
| # size = client.request(url, output='file_size') | |
| # if size == '0': | |
| # size = False | |
| # float_size, str_size = convert_size(size) | |
| # return float_size, str_size | |
| # except: | |
| # log_utils.error() | |
| # return False | |
| def convert_size(size_bytes, to='GB'): | |
| try: | |
| import math | |
| if size_bytes == 0: | |
| return 0, '' | |
| power = {'B' : 0, 'KB': 1, 'MB' : 2, 'GB': 3, 'TB' : 4, 'EB' : 5, 'ZB' : 6, 'YB': 7} | |
| i = power[to] | |
| p = math.pow(1024, i) | |
| float_size = round(size_bytes / p, 2) | |
| # if to == 'B' or to == 'KB': | |
| # return 0, '' | |
| str_size = "%s %s" % (float_size, to) | |
| return float_size, str_size | |
| except: | |
| log_utils.error() | |
| return 0, '' | |
| def check_directstreams(url, hoster='', quality='SD'): | |
| urls = [] | |
| host = hoster | |
| # if 'google' in url or any(x in url for x in ['youtube.', 'docid=']): | |
| # urls = directstream.google(url) | |
| # if not urls: | |
| # tag = directstream.googletag(url) | |
| # if tag: | |
| # urls = [{'quality': tag[0]['quality'], 'url': url}] | |
| # if urls: | |
| # host = 'gvideo' | |
| # elif 'ok.ru' in url: | |
| # urls = directstream.odnoklassniki(url) | |
| # if urls: | |
| # host = 'vk' | |
| # elif 'vk.com' in url: | |
| # urls = directstream.vk(url) | |
| # if urls: | |
| # host = 'vk' | |
| if any(x in url for x in ['akamaized', 'blogspot', 'ocloud.stream']): | |
| urls = [{'url': url}] | |
| if urls: host = 'CDN' | |
| direct = True if urls else False | |
| if not urls: | |
| urls = [{'quality': quality, 'url': url}] | |
| return urls, host, direct | |
| def evp_decode(cipher_text, passphrase, salt=None): | |
| cipher_text = base64.b64decode(cipher_text) | |
| if not salt: | |
| salt = cipher_text[8:16] | |
| cipher_text = cipher_text[16:] | |
| data = evpKDF(passphrase, salt) | |
| decrypter = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(data['key'], data['iv'])) | |
| plain_text = decrypter.feed(cipher_text) | |
| plain_text += decrypter.feed() | |
| return plain_text | |
| def evpKDF(passwd, salt, key_size=8, iv_size=4, iterations=1, hash_algorithm="md5"): | |
| target_key_size = key_size + iv_size | |
| derived_bytes = b"" | |
| number_of_derived_words = 0 | |
| block = None | |
| hasher = hashlib.new(hash_algorithm) | |
| while number_of_derived_words < target_key_size: | |
| if block is not None: | |
| hasher.update(block) | |
| hasher.update(passwd) | |
| hasher.update(salt) | |
| block = hasher.digest() | |
| hasher = hashlib.new(hash_algorithm) | |
| for _i in range(1, iterations): | |
| hasher.update(block) | |
| block = hasher.digest() | |
| hasher = hashlib.new(hash_algorithm) | |
| derived_bytes += block[0: min(len(block), (target_key_size - number_of_derived_words) * 4)] | |
| number_of_derived_words += len(block) // 4 | |
| return {"key": derived_bytes[0: key_size * 4], "iv": derived_bytes[key_size * 4:]} | |
| def remove_lang(name): | |
| try: | |
| name = name.lower() | |
| name = name.replace(' ', '.') | |
| except: | |
| name = str(name) | |
| if any(value in name for value in LANG): | |
| return True | |
| elif any(value in name for value in UNDESIREABLES): | |
| return True | |
| elif any(value in name for value in DUBBED): | |
| return True | |
| elif 'rus' in name and 'eng' not in name.lower(): | |
| return True | |
| else: | |
| return False | |
| def scraper_error(provider): | |
| import traceback | |
| failure = traceback.format_exc() | |
| log_utils.log(provider.upper() + ' - Exception: \n' + str(failure), log_utils.LOGDEBUG) | |
| def timeIt(func): | |
| import time | |
| fnc_name = func.__name__ | |
| def wrap(*args, **kwargs): | |
| started_at = time.time() | |
| result = func(*args, **kwargs) | |
| log_utils.log('%s.%s = %s' % (__name__ , fnc_name, time.time() - started_at), log_utils.LOGDEBUG) | |
| return result | |
| return wrap | |
| #ka | |
| def get_titles_for_search(title, localtitle, aliases): | |
| try: | |
| titles = [] | |
| if "country':" in str(aliases): aliases = aliases_to_array(aliases) | |
| if localtitle != '': titles.append(localtitle) | |
| if title != ''and title.lower() != localtitle.lower(): titles.append(title) | |
| [titles.append(i) for i in aliases if i.lower() != title.lower() and i.lower() != localtitle.lower() and i != ''] | |
| #titles = [str(i) for i in titles if all(ord(c) < 128 for c in i)] | |
| titles = [item for i, item in enumerate(titles) if item not in titles[:i]] | |
| return titles | |
| except: | |
| return [] | |
| def check_302(url): | |
| try: | |
| from resources.lib.requestHandler import cRequestHandler | |
| while True: | |
| oRequest = cRequestHandler(url, caching=False) | |
| oRequest.request() | |
| status_code = int(oRequest.getStatus()) | |
| if 300 <= status_code <= 400: | |
| url = oRequest.getRealUrl() | |
| elif 403 == status_code: | |
| return url | |
| elif 400 <= status_code: | |
| return | |
| elif 200 == status_code: | |
| return url | |
| else: | |
| break | |
| return | |
| except: | |
| return |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # edit 2025-06-12 | |
| import sys | |
| import re, json, random, time | |
| from concurrent.futures import ThreadPoolExecutor | |
| from resources.lib import log_utils, utils, control | |
| from resources.lib.control import py2_decode, py2_encode, quote_plus, parse_qsl | |
| import resolveurl as resolver | |
| # from functools import reduce | |
| from resources.lib.control import getKodiVersion | |
| if int(getKodiVersion()) >= 20: from infotagger.listitem import ListItemInfoTag | |
| # für self.sysmeta - zur späteren verwendung als meta | |
| _params = dict(parse_qsl(sys.argv[2].replace('?',''))) if len(sys.argv) > 1 else dict() | |
| class sources: | |
| def __init__(self): | |
| self.getConstants() | |
| self.sources = [] | |
| self.current = int(time.time()) | |
| if 'sysmeta' in _params: self.sysmeta = _params['sysmeta'] # string zur späteren verwendung als meta | |
| self.watcher = False | |
| self.executor = ThreadPoolExecutor(max_workers=20) | |
| self.url = None | |
| def get(self, params): | |
| data = json.loads(params['sysmeta']) | |
| self.mediatype = data.get('mediatype') | |
| self.aliases = data.get('aliases') if 'aliases' in data else [] | |
| title = py2_encode(data.get('title')) | |
| originaltitle = py2_encode(data.get('originaltitle')) if 'originaltitle' in data else title | |
| year = data.get('year') if 'year' in data else None | |
| imdb = data.get('imdb_id') if 'imdb_id' in data else data.get('imdbnumber') if 'imdbnumber' in data else None | |
| if not imdb and 'imdb' in data: imdb = data.get('imdb') | |
| tmdb = data.get('tmdb_id') if 'tmdb_id' in data else None | |
| #if tmdb and not imdb: print 'hallo' #TODO | |
| season = data.get('season') if 'season' in data else 0 | |
| episode = data.get('episode') if 'episode' in data else 0 | |
| premiered = data.get('premiered') if 'premiered' in data else None | |
| meta = params['sysmeta'] | |
| select = data.get('select') if 'select' in data else None | |
| return title, year, imdb, season, episode, originaltitle, premiered, meta, select | |
| def play(self, params): | |
| title, year, imdb, season, episode, originaltitle, premiered, meta, select = self.get(params) | |
| try: | |
| url = None | |
| #Liste der gefundenen Streams | |
| items = self.getSources(title, year, imdb, season, episode, originaltitle, premiered) | |
| select = control.getSetting('hosts.mode') if select == None else select | |
| ## unnötig | |
| #select = '1' if control.getSetting('downloads') == 'true' and not (control.getSetting('download.movie.path') == '' or control.getSetting('download.tv.path') == '') else select | |
| # # TODO überprüfen wofür mal gedacht | |
| # if control.window.getProperty('PseudoTVRunning') == 'True': | |
| # return control.resolveUrl(int(sys.argv[1]), True, control.item(path=str(self.sourcesDirect(items)))) | |
| if len(items) > 0: | |
| # Auswahl Verzeichnis | |
| if select == '1' and 'plugin' in control.infoLabel('Container.PluginName'): | |
| control.window.clearProperty(self.itemsProperty) | |
| control.window.setProperty(self.itemsProperty, json.dumps(items)) | |
| control.window.clearProperty(self.metaProperty) | |
| control.window.setProperty(self.metaProperty, meta) | |
| control.sleep(2) | |
| return control.execute('Container.Update(%s?action=addItem&title=%s)' % (sys.argv[0], quote_plus(title))) | |
| # Auswahl Dialog | |
| elif select == '0' or select == '1': | |
| url = self.sourcesDialog(items) | |
| if url == 'close://': return | |
| # Autoplay | |
| else: | |
| url = self.sourcesDirect(items) | |
| if url == None: return self.errorForSources() | |
| try: meta = json.loads(meta) | |
| except: pass | |
| from resources.lib.player import player | |
| player().run(title, url, meta) | |
| except Exception as e: | |
| log_utils.log('Error %s' % str(e), log_utils.LOGERROR) | |
| # Liste gefundene Streams Indexseite|Hoster | |
| def addItem(self, title): | |
| control.playlist.clear() | |
| items = control.window.getProperty(self.itemsProperty) | |
| items = json.loads(items) | |
| if items == None or len(items) == 0: control.idle() ; sys.exit() | |
| sysaddon = sys.argv[0] | |
| syshandle = int(sys.argv[1]) | |
| systitle = sysname = quote_plus(title) | |
| meta = control.window.getProperty(self.metaProperty) | |
| meta = json.loads(meta) | |
| #TODO | |
| if meta['mediatype'] == 'movie': | |
| # downloads = True if control.getSetting('downloads') == 'true' and control.exists(control.translatePath(control.getSetting('download.movie.path'))) else False | |
| downloads = True if control.getSetting('downloads') == 'true' and control.getSetting('download.movie.path') else False | |
| else: | |
| # downloads = True if control.getSetting('downloads') == 'true' and control.exists(control.translatePath(control.getSetting('download.tv.path'))) else False | |
| downloads = True if control.getSetting('downloads') == 'true' and control.getSetting('download.tv.path') else False | |
| addonPoster, addonBanner = control.addonPoster(), control.addonBanner() | |
| addonFanart, settingFanart = control.addonFanart(), control.getSetting('fanart') | |
| if 'backdrop_url' in meta and 'http' in meta['backdrop_url']: fanart = meta['backdrop_url'] | |
| elif 'fanart' in meta and 'http' in meta['fanart']: fanart = meta['fanart'] | |
| else: fanart = addonFanart | |
| if 'cover_url' in meta and 'http' in meta['cover_url']: poster = meta['cover_url'] | |
| elif 'poster' in meta and 'http' in meta['poster']: poster = meta['poster'] | |
| else: poster = addonPoster | |
| sysimage = poster | |
| if 'season' in meta and 'episode' in meta: | |
| sysname += quote_plus(' S%02dE%02d' % (int(meta['season']), int(meta['episode']))) | |
| elif 'year' in meta: | |
| sysname += quote_plus(' (%s)' % meta['year']) | |
| for i in range(len(items)): | |
| try: | |
| label = items[i]['label'] | |
| syssource = quote_plus(json.dumps([items[i]])) | |
| item = control.item(label=label, offscreen=True) | |
| item.setProperty('IsPlayable', 'true') | |
| item.setArt({'poster': poster, 'banner': addonBanner}) | |
| if settingFanart == 'true': item.setProperty('Fanart_Image', fanart) | |
| cm = [] | |
| if downloads: | |
| cm.append(("Download", 'RunPlugin(%s?action=download&name=%s&image=%s&source=%s)' % (sysaddon, sysname, sysimage, syssource))) | |
| cm.append(('Einstellungen', 'RunPlugin(%s?action=addonSettings)' % sysaddon)) | |
| item.addContextMenuItems(cm) | |
| url = "%s?action=playItem&title=%s&source=%s" % (sysaddon, systitle, syssource) | |
| # ## Notwendig für Library Exporte ## | |
| # ## Amazon Scraper Details ## | |
| # if "amazon" in label.lower(): | |
| # aid = re.search(r'asin%3D(.*?)%22%2C', url) | |
| # url = "plugin://plugin.video.amazon-test/?mode=PlayVideo&asin=" + aid.group(1) | |
| ##https: // codedocs.xyz / AlwinEsch / kodi / group__python__xbmcgui__listitem.html # ga0b71166869bda87ad744942888fb5f14 | |
| name = '%s%sStaffel: %s Episode: %s' % (title, "\n", meta['season'], meta['episode']) if 'season' in meta else title | |
| plot = meta['plot'] if 'plot' in meta and len(meta['plot'].strip()) >= 1 else '' | |
| plot = '[COLOR blue]%s[/COLOR]%s%s' % (name, "\n\n", py2_encode(plot)) | |
| if 'duration' in meta: | |
| infolable = {'plot': plot,'duration': meta['duration']} | |
| else: | |
| infolable = {'plot': plot} | |
| # TODO | |
| # if 'cast' in meta and meta['cast']: item.setCast(meta['cast']) | |
| # # # remove unsupported InfoLabels | |
| meta.pop('cast', None) # ersetzt durch item.setCast(i['cast']) | |
| meta.pop('number_of_seasons', None) | |
| meta.pop('imdb_id', None) | |
| meta.pop('tvdb_id', None) | |
| meta.pop('tmdb_id', None) | |
| ## Quality Video Stream from source.append quality - items[i]['quality'] | |
| video_streaminfo ={} | |
| if "4k" in items[i]['quality'].lower(): | |
| video_streaminfo.update({'width': 3840, 'height': 2160}) | |
| elif "1080p" in items[i]['quality'].lower(): | |
| video_streaminfo.update({'width': 1920, 'height': 1080}) | |
| elif "hd" in items[i]['quality'].lower() or "720p" in items[i]['quality'].lower(): | |
| video_streaminfo.update({'width': 1280,'height': 720}) | |
| else: | |
| # video_streaminfo.update({"width": 720, "height": 576}) | |
| video_streaminfo.update({}) | |
| ## Codec for Video Stream from extra info - items[i]['info'] | |
| if 'hevc' in items[i]['label'].lower(): | |
| video_streaminfo.update({'codec': 'hevc'}) | |
| elif '265' in items[i]['label'].lower(): | |
| video_streaminfo.update({'codec': 'h265'}) | |
| elif 'mkv' in items[i]['label'].lower(): | |
| video_streaminfo.update({'codec': 'mkv'}) | |
| elif 'mp4' in items[i]['label'].lower(): | |
| video_streaminfo.update({'codec': 'mp4'}) | |
| else: | |
| # video_streaminfo.update({'codec': 'h264'}) | |
| video_streaminfo.update({'codec': ''}) | |
| ## Quality & Channels Audio Stream from extra info - items[i]['info'] | |
| audio_streaminfo = {} | |
| if 'dts' in items[i]['label'].lower(): | |
| audio_streaminfo.update({'codec': 'dts'}) | |
| elif 'plus' in items[i]['label'].lower() or 'e-ac3' in items[i]['label'].lower(): | |
| audio_streaminfo.update({'codec': 'eac3'}) | |
| elif 'dolby' in items[i]['label'].lower() or 'ac3' in items[i]['label'].lower(): | |
| audio_streaminfo.update({'codec': 'ac3'}) | |
| else: | |
| # audio_streaminfo.update({'codec': 'aac'}) | |
| audio_streaminfo.update({'codec': ''}) | |
| ## Channel update ## | |
| if '7.1' in items[i].get('info','').lower(): | |
| audio_streaminfo.update({'channels': 8}) | |
| elif '5.1' in items[i].get('info','').lower(): | |
| audio_streaminfo.update({'channels': 6}) | |
| else: | |
| # audio_streaminfo.update({'channels': 2}) | |
| audio_streaminfo.update({'channels': ''}) | |
| if int(getKodiVersion()) <= 19: | |
| item.setInfo(type='Video', infoLabels=infolable) | |
| item.addStreamInfo('video', video_streaminfo) | |
| item.addStreamInfo('audio', audio_streaminfo) | |
| else: | |
| info_tag = ListItemInfoTag(item, 'video') | |
| info_tag.set_info(infolable) | |
| stream_details = { | |
| 'video': [video_streaminfo], | |
| 'audio': [audio_streaminfo]} | |
| info_tag.set_stream_details(stream_details) | |
| # info_tag.set_cast(aActors) | |
| control.addItem(handle=syshandle, url=url, listitem=item, isFolder=False) | |
| except: | |
| pass | |
| control.content(syshandle, 'videos') | |
| control.plugincategory(syshandle, control.addonVersion) | |
| control.endofdirectory(syshandle, cacheToDisc=True) | |
| def playItem(self, title, source): | |
| isDebug = False | |
| if isDebug: log_utils.log('start playItem', log_utils.LOGWARNING) | |
| try: | |
| meta = control.window.getProperty(self.metaProperty) | |
| meta = json.loads(meta) | |
| header = control.addonInfo('name') | |
| # control.idle() #ok | |
| progressDialog = control.progressDialog if control.getSetting('progress.dialog') == '0' else control.progressDialogBG | |
| progressDialog.create(header, '') | |
| progressDialog.update(0) | |
| item = json.loads(source)[0] | |
| #if isDebug: log_utils.log('playItem 237', log_utils.LOGWARNING) | |
| if item['source'] == None: raise Exception() | |
| future = self.executor.submit(self.sourcesResolve, item) | |
| waiting_time = 30 | |
| while waiting_time > 0: | |
| try: | |
| if control.abortRequested: return sys.exit() | |
| if progressDialog.iscanceled(): return progressDialog.close() | |
| except: | |
| pass | |
| if future.done(): break | |
| control.sleep(1) | |
| waiting_time = waiting_time - 1 | |
| progressDialog.update(int(100 - 100. / 30 * waiting_time), str(item['label'])) | |
| #if isDebug: log_utils.log('playItem 252', log_utils.LOGWARNING) | |
| if control.condVisibility('Window.IsActive(virtualkeyboard)') or \ | |
| control.condVisibility('Window.IsActive(yesnoDialog)'): | |
| # or control.condVisibility('Window.IsActive(PopupRecapInfoWindow)'): | |
| waiting_time = waiting_time + 1 # dont count down while dialog is presented | |
| if future.done(): break | |
| try: progressDialog.close() | |
| except: pass | |
| if isDebug: log_utils.log('playItem 261', log_utils.LOGWARNING) | |
| control.execute('Dialog.Close(virtualkeyboard)') | |
| control.execute('Dialog.Close(yesnoDialog)') | |
| if isDebug: log_utils.log('playItem url: %s' % self.url, log_utils.LOGWARNING) | |
| if self.url == None: | |
| #self.errorForSources() | |
| return | |
| from resources.lib.player import player | |
| player().run(title, self.url, meta) | |
| return self.url | |
| except Exception as e: | |
| log_utils.log('Error %s' % str(e), log_utils.LOGERROR) | |
| def getSources(self, title, year, imdb, season, episode, originaltitle, premiered, quality='HD', timeout=30): | |
| #TODO | |
| # self._getHostDict() | |
| control.idle() #ok | |
| progressDialog = control.progressDialog if control.getSetting('progress.dialog') == '0' else control.progressDialogBG | |
| progressDialog.create(control.addonInfo('name'), '') | |
| progressDialog.update(0) | |
| progressDialog.update(0, "Quellen werden vorbereitet") | |
| sourceDict = self.sourceDict | |
| sourceDict = [(i[0], i[1], i[1].priority) for i in sourceDict] | |
| random.shuffle(sourceDict) | |
| sourceDict = sorted(sourceDict, key=lambda i: i[2]) | |
| content = 'movies' if season == 0 or season == '' or season == None else 'shows' | |
| aliases, localtitle = utils.getAliases(imdb, content) | |
| if localtitle and title != localtitle and originaltitle != localtitle: | |
| if not title in aliases: aliases.append(title) | |
| title = localtitle | |
| for i in self.aliases: | |
| if not i in aliases: | |
| aliases.append(i) | |
| titles = utils.get_titles_for_search(title, originaltitle, aliases) | |
| futures = {self.executor.submit(self._getSource, titles, year, season, episode, imdb, provider[0], provider[1]): provider[0] for provider in sourceDict} | |
| provider_names = {provider[0].upper() for provider in sourceDict} | |
| string4 = "Total" | |
| try: timeout = int(control.getSetting('scrapers.timeout')) | |
| except: pass | |
| quality = control.getSetting('hosts.quality') | |
| if quality == '': quality = '0' | |
| source_4k = 0 | |
| source_1080 = 0 | |
| source_720 = 0 | |
| source_sd = 0 | |
| total = d_total = 0 | |
| total_format = '[COLOR %s][B]%s[/B][/COLOR]' | |
| pdiag_format = ' 4K: %s | 1080p: %s | 720p: %s | SD: %s | %s: %s '.split('|') | |
| for i in range(0, 4 * timeout): | |
| try: | |
| if control.abortRequested: return sys.exit() | |
| try: | |
| if progressDialog.iscanceled(): break | |
| except: | |
| pass | |
| if len(self.sources) > 0: | |
| if quality in ['0']: | |
| source_4k = len([e for e in self.sources if e['quality'] == '4K']) | |
| source_1080 = len([e for e in self.sources if e['quality'] in ['1440p','1080p']]) | |
| source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD']]) | |
| source_sd = len([e for e in self.sources if e['quality'] not in ['4K','1440p','1080p','720p','HD']]) | |
| elif quality in ['1']: | |
| source_1080 = len([e for e in self.sources if e['quality'] in ['1440p','1080p']]) | |
| source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD']]) | |
| source_sd = len([e for e in self.sources if e['quality'] not in ['4K','1440p','1080p','720p','HD']]) | |
| elif quality in ['2']: | |
| source_1080 = len([e for e in self.sources if e['quality'] in ['1080p']]) | |
| source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD']]) | |
| source_sd = len([e for e in self.sources if e['quality'] not in ['4K','1440p','1080p','720p','HD']]) | |
| elif quality in ['3']: | |
| source_720 = len([e for e in self.sources if e['quality'] in ['720p','HD']]) | |
| source_sd = len([e for e in self.sources if e['quality'] not in ['4K','1440p','1080p','720p','HD']]) | |
| else: | |
| source_sd = len([e for e in self.sources if e['quality'] not in ['4K','1440p','1080p','720p','HD']]) | |
| total = source_4k + source_1080 + source_720 + source_sd | |
| source_4k_label = total_format % ('red', source_4k) if source_4k == 0 else total_format % ('lime', source_4k) | |
| source_1080_label = total_format % ('red', source_1080) if source_1080 == 0 else total_format % ('lime', source_1080) | |
| source_720_label = total_format % ('red', source_720) if source_720 == 0 else total_format % ('lime', source_720) | |
| source_sd_label = total_format % ('red', source_sd) if source_sd == 0 else total_format % ('lime', source_sd) | |
| source_total_label = total_format % ('red', total) if total == 0 else total_format % ('lime', total) | |
| try: | |
| info = [name.upper() for future, name in futures.items() if not future.done()] | |
| percent = int(100 * float(i) / (2 * timeout) + 1) | |
| if quality in ['0']: | |
| line1 = '|'.join(pdiag_format) % (source_4k_label, source_1080_label, source_720_label, source_sd_label, str(string4), source_total_label) | |
| elif quality in ['1']: | |
| line1 = '|'.join(pdiag_format[1:]) % (source_1080_label, source_720_label, source_sd_label, str(string4), source_total_label) | |
| elif quality in ['2']: | |
| line1 = '|'.join(pdiag_format[1:]) % (source_1080_label, source_720_label, source_sd_label, str(string4), source_total_label) | |
| elif quality in ['3']: | |
| line1 = '|'.join(pdiag_format[2:]) % (source_720_label, source_sd_label, str(string4), source_total_label) | |
| else: | |
| line1 = '|'.join(pdiag_format[3:]) % (source_sd_label, str(string4), source_total_label) | |
| if (i / 2) < timeout: | |
| string = "Verbleibende Indexseiten: %s" | |
| else: | |
| string = 'Waiting for: %s' | |
| if len(info) > 6: line = line1 + string % (str(len(info))) | |
| elif len(info) > 1: line = line1 + string % (', '.join(info)) | |
| elif len(info) == 1: line = line1 + string % (''.join(info)) | |
| else: line = line1 + 'Suche beendet!' | |
| progressDialog.update(max(1, percent), line) | |
| if len(info) == 0: break | |
| except Exception as e: | |
| log_utils.log('Exception Raised: %s' % str(e), log_utils.LOGERROR) | |
| control.sleep(1) | |
| except: | |
| pass | |
| time.sleep(1) | |
| try: progressDialog.close() | |
| except: pass | |
| self.sourcesFilter() | |
| return self.sources | |
| def _getSource(self, titles, year, season, episode, imdb, source, call): | |
| try: | |
| sources = call.run(titles, year, season, episode, imdb) # kasi self.hostDict | |
| if sources == None or sources == []: raise Exception() | |
| sources = [json.loads(t) for t in set(json.dumps(d, sort_keys=True) for d in sources)] | |
| for i in sources: | |
| i.update({'provider': source}) | |
| if not 'priority' in i: i.update({'priority': 100}) | |
| if not 'prioHoster' in i: i.update({'prioHoster': 100}) | |
| self.sources.extend(sources) | |
| except: | |
| pass | |
| def sourcesFilter(self): | |
| # hostblockDict = utils.getHostDict() | |
| # self.sources = [i for i in self.sources if i['source'].split('.')[0] not in str(hostblockDict)] # Hoster ausschließen (Liste) | |
| quality = control.getSetting('hosts.quality') | |
| if quality == '': quality = '0' | |
| random.shuffle(self.sources) | |
| self.sources = sorted(self.sources, key=lambda k: k['prioHoster'], reverse=False) | |
| for i in range(len(self.sources)): | |
| q = self.sources[i]['quality'] | |
| if q.lower() == 'hd': self.sources[i].update({'quality': '720p'}) | |
| filter = [] | |
| if quality in ['0']: filter += [i for i in self.sources if i['quality'] == '4K'] | |
| if quality in ['0', '1']: filter += [i for i in self.sources if i['quality'] == '1440p'] | |
| if quality in ['0', '1', '2']: filter += [i for i in self.sources if i['quality'] == '1080p'] | |
| if quality in ['0', '1', '2', '3']: filter += [i for i in self.sources if i['quality'] == '720p'] | |
| #filter += [i for i in self.sources if i['quality'] in ['SD', 'SCR', 'CAM']] | |
| filter += [i for i in self.sources if i['quality'] not in ['4k', '1440p', '1080p', '720p']] | |
| self.sources = filter | |
| if control.getSetting('hosts.sort.provider') == 'true': | |
| self.sources = sorted(self.sources, key=lambda k: k['provider']) | |
| if control.getSetting('hosts.sort.priority') == 'true' and self.mediatype == 'tvshow': self.sources = sorted(self.sources, key=lambda k: k['priority'], reverse=False) | |
| if str(control.getSetting('hosts.limit')) == 'true': | |
| self.sources = self.sources[:int(control.getSetting('hosts.limit.num'))] | |
| else: | |
| self.sources = self.sources[:100] | |
| for i in range(len(self.sources)): | |
| p = self.sources[i]['provider'] | |
| q = self.sources[i]['quality'] | |
| s = self.sources[i]['source'] | |
| ## s = s.rsplit('.', 1)[0] | |
| l = self.sources[i]['language'] | |
| try: f = (' | '.join(['[I]%s [/I]' % info.strip() for info in self.sources[i]['info'].split('|')])) | |
| except: f = '' | |
| label = '%02d | [B]%s[/B] | ' % (int(i + 1), p) | |
| if q in ['4K', '1440p', '1080p', '720p']: label += '%s | [B][I]%s [/I][/B] | %s' % (s, q, f) | |
| elif q == 'SD': label += '%s | %s' % (s, f) | |
| else: label += '%s | %s | [I]%s [/I]' % (s, f, q) | |
| label = label.replace('| 0 |', '|').replace(' | [I]0 [/I]', '') | |
| label = re.sub(r'\[I\]\s+\[/I\]', ' ', label) | |
| label = re.sub(r'\|\s+\|', '|', label) | |
| label = re.sub(r'\|(?:\s+|)$', '', label) | |
| self.sources[i]['label'] = label.upper() | |
| # ## EMBY shown as premium link ## | |
| # if self.sources[i]['provider']=="emby" or self.sources[i]['provider']=="amazon" or self.sources[i]['provider']=="netflix" or self.sources[i]['provider']=="maxdome": | |
| # prem_identify = 'blue' | |
| # self.sources[i]['label'] = ('[COLOR %s]' % (prem_identify)) + label.upper() + '[/COLOR]' | |
| self.sources = [i for i in self.sources if 'label' in i] | |
| return self.sources | |
| def sourcesResolve(self, item, info=False): | |
| try: | |
| self.url = None | |
| url = item['url'] | |
| direct = item['direct'] | |
| local = item.get('local', False) | |
| provider = item['provider'] | |
| call = [i[1] for i in self.sourceDict if i[0] == provider][0] | |
| url = call.resolve(url) | |
| if not direct == True: | |
| try: | |
| hmf = resolver.HostedMediaFile(url=url, include_disabled=True, include_universal=False) | |
| if hmf.valid_url(): | |
| url = hmf.resolve() | |
| if url == False or url == None or url == '': url = None # raise Exception() | |
| except: | |
| url = None | |
| if url == None or (not '://' in str(url) and not local): | |
| log_utils.log('Kein Video Link gefunden: Provider %s / %s / %s ' % (item['provider'], item['source'] , str(item['source'])), log_utils.LOGERROR) | |
| raise Exception() | |
| # if not utils.test_stream(url): | |
| # log_utils.log('URL Test Error: %s' % url, log_utils.LOGERROR) | |
| # raise Exception() | |
| # url = utils.m3u8_check(url) | |
| if url: | |
| self.url = url | |
| return url | |
| else: | |
| raise Exception() | |
| except: | |
| if info: self.errorForSources() | |
| return | |
| def sourcesDialog(self, items): | |
| labels = [i['label'] for i in items] | |
| select = control.selectDialog(labels) | |
| if select == -1: return 'close://' | |
| next = [y for x,y in enumerate(items) if x >= select] | |
| prev = [y for x,y in enumerate(items) if x < select][::-1] | |
| items = [items[select]] | |
| items = [i for i in items+next+prev][:40] | |
| header = control.addonInfo('name') | |
| header2 = header.upper() | |
| progressDialog = control.progressDialog if control.getSetting('progress.dialog') == '0' else control.progressDialogBG | |
| progressDialog.create(header, '') | |
| progressDialog.update(0) | |
| block = None | |
| try: | |
| for i in range(len(items)): | |
| try: | |
| if items[i]['source'] == block: raise Exception() | |
| future = self.executor.submit(self.sourcesResolve, items[i]) | |
| try: | |
| if progressDialog.iscanceled(): break | |
| progressDialog.update(int((100 / float(len(items))) * i), str(items[i]['label'])) | |
| except: | |
| progressDialog.update(int((100 / float(len(items))) * i), str(header2) + str(items[i]['label'])) | |
| waiting_time = 30 | |
| while waiting_time > 0: | |
| try: | |
| if control.abortRequested: return sys.exit() #xbmc.Monitor().abortRequested() | |
| if progressDialog.iscanceled(): return progressDialog.close() | |
| except: | |
| pass | |
| if future.done(): break | |
| control.sleep(1) | |
| waiting_time = waiting_time - 1 | |
| if control.condVisibility('Window.IsActive(virtualkeyboard)') or \ | |
| control.condVisibility('Window.IsActive(yesnoDialog)') or \ | |
| control.condVisibility('Window.IsActive(ProgressDialog)'): | |
| waiting_time = waiting_time + 1 #dont count down while dialog is presented ## control.condVisibility('Window.IsActive(PopupRecapInfoWindow)') or \ | |
| if not future.done(): block = items[i]['source'] | |
| if self.url == None: raise Exception() | |
| self.selectedSource = items[i]['label'] | |
| try: progressDialog.close() | |
| except: pass | |
| control.execute('Dialog.Close(virtualkeyboard)') | |
| control.execute('Dialog.Close(yesnoDialog)') | |
| return self.url | |
| except: | |
| pass | |
| try: progressDialog.close() | |
| except: pass | |
| except Exception as e: | |
| try: progressDialog.close() | |
| except: pass | |
| log_utils.log('Error %s' % str(e), log_utils.LOGINFO) | |
| def sourcesDirect(self, items): | |
| # TODO - OK | |
| # filter = [i for i in items if i['source'].lower() in self.hostcapDict and i['debrid'] == ''] | |
| # items = [i for i in items if not i in filter] | |
| # items = [i for i in items if ('autoplay' in i and i['autoplay'] == True) or not 'autoplay' in i] | |
| u = None | |
| header = control.addonInfo('name') | |
| header2 = header.upper() | |
| try: | |
| control.sleep(1) | |
| progressDialog = control.progressDialog if control.getSetting('progress.dialog') == '0' else control.progressDialogBG | |
| progressDialog.create(header, '') | |
| progressDialog.update(0) | |
| except: | |
| pass | |
| for i in range(len(items)): | |
| try: | |
| if progressDialog.iscanceled(): break | |
| progressDialog.update(int((100 / float(len(items))) * i), str(items[i]['label'])) | |
| except: | |
| progressDialog.update(int((100 / float(len(items))) * i), str(header2) + str(items[i]['label'])) | |
| try: | |
| if control.abortRequested: return sys.exit() | |
| url = self.sourcesResolve(items[i]) | |
| if u == None: u = url | |
| if not url == None: break | |
| except: | |
| pass | |
| try: progressDialog.close() | |
| except: pass | |
| return u | |
| def errorForSources(self): | |
| control.infoDialog("Keine Streams verfügbar oder ausgewählt", sound=False, icon='INFO') | |
| def getTitle(self, title): | |
| title = utils.normalize(title) | |
| return title | |
| def getConstants(self): | |
| self.itemsProperty = '%s.container.items' % control.Addon.getAddonInfo('id') | |
| self.metaProperty = '%s.container.meta' % control.Addon.getAddonInfo('id') | |
| from scrapers import sources | |
| self.sourceDict = sources() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # streamcloud | |
| # 2023-08-01 | |
| # edit 2024-12-14 | |
| from resources.lib.utils import isBlockedHoster | |
| from scrapers.modules.tools import cParser | |
| from resources.lib.requestHandler import cRequestHandler | |
| from scrapers.modules import cleantitle, dom_parser | |
| from resources.lib.control import getSetting, setSetting | |
| SITE_IDENTIFIER = 'streamcloud' | |
| SITE_DOMAIN = 'streamcloud.plus' # https://topstreamfilm.live/ https://meinecloud.click/movie/tt1630029 | |
| SITE_NAME = SITE_IDENTIFIER.upper() | |
| class source: | |
| def __init__(self): | |
| self.priority = 1 | |
| self.language = ['de'] | |
| self.domain = getSetting('provider.' + SITE_IDENTIFIER + '.domain', SITE_DOMAIN) | |
| self.base_link = 'https://' + self.domain | |
| self.search_link = self.base_link + '/index.php?story=%s&do=search&subaction=search' | |
| self.sources = [] | |
| def run(self, titles, year, season=0, episode=0, imdb=''): | |
| try: | |
| if season == 0: | |
| ## https://meinecloud.click/movie/tt1477834 | |
| oRequest = cRequestHandler('https://meinecloud.click/movie/%s' % imdb, caching=True) | |
| sHtmlContent = oRequest.request() | |
| isMatch, aResult = cParser.parse(sHtmlContent, 'data-link="([^"]+)') | |
| for sUrl in aResult: | |
| if sUrl.startswith('/'): sUrl = 'https:' + sUrl | |
| isBlocked, hoster, url, prioHoster = isBlockedHoster(sUrl) | |
| if isBlocked: continue | |
| if url: | |
| self.sources.append({'source': hoster, 'quality': '720p', 'language': 'de', 'url': url, 'direct': True, 'prioHoster': prioHoster}) | |
| else: | |
| oRequest = cRequestHandler(self.search_link % imdb, caching=True) | |
| sHtmlContent = oRequest.request() | |
| pattern = 'class="thumb".*?title="([^"]+).*?href="([^"]+).*?_year">([^<]+)' | |
| isMatch, aResult = cParser.parse(sHtmlContent, pattern) | |
| if not isMatch: return self.sources | |
| sName, sUrl, sYear = aResult[0] | |
| oRequest = cRequestHandler(sUrl, caching=True) | |
| sHtmlContent = oRequest.request() | |
| pattern = r'%sx%s\s.*?/>' % (str(season), str(episode)) | |
| isMatch, sLinkContainer = cParser.parseSingleResult(sHtmlContent, pattern) | |
| pattern = 'href="([^"]+)' | |
| isMatch, aResult = cParser.parse(sLinkContainer, pattern) | |
| if not isMatch: return self.sources | |
| for sUrl in aResult: | |
| if sUrl.startswith('/'): sUrl = 'https:' + sUrl | |
| isBlocked, hoster, url, prioHoster = isBlockedHoster(sUrl) | |
| if isBlocked: continue | |
| if url: | |
| self.sources.append({'source': hoster, 'quality': '720p', 'language': 'de', 'url': url, 'direct': True, 'prioHoster': prioHoster}) | |
| return self.sources | |
| except: | |
| return self.sources | |
| def resolve(self, url): | |
| return url |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #2022-11-05 | |
| # edit 2023-02-23 | |
| from __future__ import print_function | |
| import json, re | |
| from resources.lib.requestHandler import cRequestHandler | |
| from resources.lib.control import getSetting | |
| class cTMDB: | |
| TMDB_GENRES = {12: 'Abenteuer', 14: 'Fantasy', 16: 'Animation', 18: 'Drama', 27: 'Horror', 28: 'Action', 35: 'Komödie', 36: 'Historie', 37: 'Western', 53: 'Thriller', 80: 'Krimi', 99: 'Dokumentarfilm', 878: 'Science Fiction', 9648: 'Mystery', 10402: 'Musik', 10749: 'Liebesfilm', 10751: 'Familie', 10752: 'Kriegsfilm', 10759: 'Action & Adventure', 10762: 'Kids', 10763: 'News', 10764: 'Reality', 10765: 'Sci-Fi & Fantasy', 10766: 'Soap', 10767: 'Talk', 10768: 'War & Politics', 10770: 'TV-Film'} | |
| URL = 'https://api.themoviedb.org/3/' | |
| URL_TRAILER = 'plugin://plugin.video.youtube/play/?video_id=%s' | |
| def __init__(self, api_key='', lang='de'): | |
| self.api_key = 'be7e192d9ff45609c57344a5c561be1d' # getSetting('api.tmdb').strip() | |
| self.lang = lang | |
| self.poster = 'https://image.tmdb.org/t/p/%s' % 'w342' #cConfig().getSetting('poster_tmdb') | |
| self.fanart = 'https://image.tmdb.org/t/p/%s' % 'w1280'#cConfig().getSetting('backdrop_tmdb') | |
| self.mediaType = '' | |
| self.searchDoku = getSetting('search.doku') or 'false' | |
| def search_movie_name(self, name, year='', page=1, advanced='false'): | |
| name = re.sub(" +", " ", name) | |
| if year: | |
| term = name + '&year=' + year | |
| else: | |
| term = name | |
| meta = self._call('search/movie', 'query=' + term + '&page=' + str(page)) | |
| if 'errors' not in meta and 'status_code' not in meta: | |
| if 'total_results' in meta and meta['total_results'] == 0 and year: | |
| meta = self.search_movie_name(name, '') | |
| if 'total_results' in meta and meta['total_results'] != 0: | |
| movie = '' | |
| if meta['total_results'] == 1: | |
| movie = meta['results'][0] | |
| else: | |
| for searchMovie in meta['results']: | |
| if searchMovie['genre_ids']: | |
| if self.searchDoku == 'false' and 99 in searchMovie['genre_ids']: continue | |
| if searchMovie['title'].lower() == name.lower(): | |
| movie = searchMovie | |
| break | |
| if not movie: | |
| for searchMovie in meta['results']: | |
| if searchMovie['genre_ids']: | |
| if self.searchDoku == 'false' and 99 in searchMovie['genre_ids']: continue | |
| if year: | |
| if 'release_date' in searchMovie and searchMovie['release_date']: | |
| release_date = searchMovie['release_date'] | |
| yy = release_date[:4] | |
| if int(year) - int(yy) > 1: | |
| continue | |
| movie = searchMovie | |
| break | |
| if not movie: | |
| movie = meta['results'][0] | |
| if advanced == 'true': | |
| tmdb_id = movie['id'] | |
| meta = self.search_movie_id(tmdb_id) | |
| else: | |
| meta = movie | |
| else: | |
| meta = {} | |
| return meta | |
| def search_movie_id(self, movie_id, append_to_response='append_to_response=alternative_titles,credits'): | |
| result = self._call('movie/' + str(movie_id), append_to_response) | |
| result['tmdb_id'] = movie_id | |
| return result | |
| # def search_movie_imdb_id(self, movie_id, append_to_response='append_to_response=trailers,credits'): | |
| # result = self._call('movie/' + str(movie_id), append_to_response) | |
| # result['tmdb_id'] = movie_id | |
| # return result | |
| def search_tvshow_name(self, name, year='', page=1, genre='', advanced='false'): | |
| name = name.lower() | |
| if '- staffel' in name: | |
| name = re.sub(r'\s-\s\wtaffel[^>]([1-9\-]+)', '', name) | |
| elif 'staffel' in name: | |
| name = re.sub(r'\s\wtaffel[^>]([1-9\-]+)', '', name) | |
| if year: | |
| term = name + '&year=' + year | |
| else: | |
| term = name | |
| meta = self._call('search/tv', 'query=' + term + '&page=' + str(page)) | |
| if 'errors' not in meta and 'status_code' not in meta: | |
| if 'total_results' in meta and meta['total_results'] == 0 and year: | |
| meta = self.search_tvshow_name(name, '') | |
| if 'total_results' in meta and meta['total_results'] != 0: | |
| movie = '' | |
| if meta['total_results'] == 1: | |
| movie = meta['results'][0] | |
| else: | |
| for searchMovie in meta['results']: | |
| if genre == '' or genre in searchMovie['genre_ids']: | |
| movieName = searchMovie['name'] | |
| if movieName.lower() == name.lower(): | |
| movie = searchMovie | |
| break | |
| if not movie: | |
| for searchMovie in meta['results']: | |
| if genre and genre in searchMovie['genre_ids']: | |
| if year: | |
| if 'release_date' in searchMovie and searchMovie['release_date']: | |
| release_date = searchMovie['release_date'] | |
| yy = release_date[:4] | |
| if int(year) - int(yy) > 1: | |
| continue | |
| movie = searchMovie | |
| break | |
| if not movie: | |
| movie = meta['results'][0] | |
| if advanced == 'true': | |
| tmdb_id = movie['id'] | |
| meta = self.search_tvshow_id(tmdb_id) | |
| else: | |
| meta = movie | |
| else: | |
| meta = {} | |
| return meta | |
| def search_tvshow_id(self, show_id, append_to_response='append_to_response=external_ids,alternative_titles,credits'): | |
| result = self._call('tv/' + str(show_id), append_to_response) | |
| result['tmdb_id'] = show_id | |
| return result | |
| def get_meta(self, media_type, name, imdb_id='', tmdb_id='', year='', season='', episode='', advanced='false'): | |
| name = re.sub(" +", " ", name) | |
| meta = {} | |
| if media_type == 'movie': | |
| if tmdb_id: | |
| meta = self.search_movie_id(tmdb_id) | |
| elif name: | |
| meta = self.search_movie_name(name, year, advanced=advanced) | |
| elif media_type == 'tvshow': | |
| if tmdb_id: | |
| meta = self.search_tvshow_id(tmdb_id) | |
| elif name: | |
| meta = self.search_tvshow_name(name, year, advanced=advanced) | |
| if meta and 'id' in meta: | |
| meta.update({'mediatype': media_type}) | |
| meta = self._formatSuper(meta, name) | |
| return meta | |
| def getUrl(self, url, page=1, term=''): | |
| try: | |
| if term: | |
| term = term + '&page=' + str(page) | |
| else: | |
| term = 'page=' + str(page) | |
| result = self._call(url, term) | |
| except: | |
| return False | |
| return result | |
| def _call(self, action, append_to_response=''): | |
| url = '%s%s?language=%s&api_key=%s&include_adult=false' % (self.URL, action, self.lang, self.api_key) # ®ion=DE&vote_count.gte=10 | |
| if append_to_response: | |
| url += '&%s' % append_to_response | |
| # if 'person' in url: | |
| # url = url.replace('&page=', '') | |
| oRequestHandler = cRequestHandler(url, ignoreErrors=True) | |
| name = oRequestHandler.request() | |
| data = json.loads(name) | |
| if 'status_code' in data and data['status_code'] == 34: | |
| return {} | |
| return data | |
| def getGenresFromIDs(self, genresID): | |
| sGenres = [] | |
| for gid in genresID: | |
| genre = self.TMDB_GENRES.get(gid) | |
| if genre: | |
| sGenres.append(genre) | |
| return sGenres | |
| def getLanguage(self, Language): | |
| iso_639 = {'en': 'English', 'de': 'German', 'fr': 'French', 'it': 'Italian', 'nl': 'Nederlands', 'sv': 'Swedish', 'cs': 'Czech', 'da': 'Danish', 'fi': 'Finnish', 'pl': 'Polish', 'es': 'Spanish', 'el': 'Greek', 'tr': 'Turkish', 'uk': 'Ukrainian', 'ru': 'Russian', 'kn': 'Kannada', 'ga': 'Irish', 'hr': 'Croatian', 'hu': 'Hungarian', 'ja': 'Japanese', 'no': 'Norwegian', 'id': 'Indonesian', 'ko': 'Korean', 'pt': 'Portuguese', 'lv': 'Latvian', 'lt': 'Lithuanian', 'ro': 'Romanian', 'sk': 'Slovak', 'sl': 'Slovenian', 'sq': 'Albanian', 'sr': 'Serbian', 'th': 'Thai', 'vi': 'Vietnamese', 'bg': 'Bulgarian', 'fa': 'Persian', 'hy': 'Armenian', 'ka': 'Georgian', 'ar': 'Arabic', 'af': 'Afrikaans', 'bs': 'Bosnian', 'zh': 'Chinese', 'cn': 'Chinese', 'hi': 'Hindi'} | |
| if Language in iso_639: | |
| return iso_639[Language] | |
| else: | |
| return Language | |
| def get_meta_episode(self, media_type, name, tmdb_id='', season='', episode='', advanced='false'): | |
| meta = {} | |
| if media_type == 'episode' and tmdb_id and season and episode: | |
| url = '%stv/%s/season/%s/episode/%s?api_key=%s&language=de&include_adult=false' % (self.URL, tmdb_id, season, episode, self.api_key) | |
| if advanced == 'true': url = url + '&append_to_response=external_ids,videos,credits' | |
| Data = cRequestHandler(url, ignoreErrors=True).request() | |
| if Data: | |
| meta = json.loads(Data) | |
| #meta.update({'episode': episode}) | |
| meta = self._format_episodes(meta, name) | |
| return meta | |
| else: | |
| return {} | |
| def get_meta_seasons(self, tmdb_id='', season='', advanced='false'): | |
| meta = {} | |
| if tmdb_id and season: | |
| url = '%stv/%s/season/%s?api_key=%s&language=de&include_adult=false' % (self.URL, tmdb_id, season, self.api_key) | |
| Data = cRequestHandler(url, ignoreErrors=True).request() | |
| if Data: | |
| meta = json.loads(Data) | |
| if 'id' in meta: | |
| _meta = {} | |
| if 'name' in meta and meta['name']: | |
| _meta['name'] = meta['name'] | |
| if 'poster_path' in meta and meta['poster_path']: | |
| _meta['poster'] = self.poster + str(meta['poster_path']) | |
| if 'air_date' in meta and meta['air_date']: | |
| _meta['premiered'] = meta['air_date'] | |
| if 'episodes' in meta and meta['episodes']: | |
| _meta['number_of_episodes'] = len(meta['episodes']) | |
| _meta['episodes'] = meta['episodes'] | |
| if 'season_number' in meta and meta['season_number']: | |
| _meta['season'] = meta['season_number'] | |
| if 'overview' in meta: | |
| _meta['plot'] = meta['overview'] | |
| return _meta | |
| else: | |
| return {} | |
| def _format_episodes(self, meta, name): | |
| _meta = {} | |
| if 'air_date' in meta: | |
| #_meta['aired'] = meta['air_date'] | |
| _meta['premiered'] = meta['air_date'] | |
| if 'episode_number' in meta: | |
| _meta['episode'] = meta['episode_number'] | |
| if 'name' in meta: | |
| _meta['title'] = meta['name'] | |
| if 'overview' in meta: | |
| _meta['plot'] = meta['overview'] | |
| if 'production_code' in meta: | |
| _meta['code'] = str(meta['production_code']) | |
| if 'season_number' in meta: | |
| _meta['season'] = meta['season_number'] | |
| if 'still_path' in meta and meta['still_path'] != None: | |
| _meta['cover_url'] = self.poster + meta['still_path'] | |
| _meta['poster'] = _meta['cover_url'] | |
| if 'vote_average' in meta: | |
| _meta['rating'] = meta['vote_average'] | |
| if 'vote_count' in meta: | |
| _meta['votes'] = meta['vote_count'] | |
| if 'crew' in meta: | |
| _meta['writer'] = '' | |
| _meta['director'] = '' | |
| _meta['cast'] = '' | |
| for crew in meta['crew']: | |
| if crew['department'] == 'Directing': | |
| if _meta['director'] != '': | |
| _meta['director'] += ' / ' | |
| _meta['director'] += '%s: %s' % (crew['job'], crew['name']) | |
| elif crew['department'] == 'Writing': | |
| if _meta['writer'] != '': | |
| _meta['writer'] += ' / ' | |
| _meta['writer'] += '%s: %s' % (crew['job'], crew['name']) | |
| if 'guest_stars' in meta: #TODO | |
| licast = [] | |
| for c in meta['guest_stars']: | |
| licast.append((c['name'], c['character'], self.poster + str(c['profile_path']))) | |
| _meta['cast'] = licast | |
| return _meta | |
| # def _format(self, meta, name): | |
| # _meta = {} | |
| # _meta['genre'] = '' | |
| # if 'id' in meta: | |
| # _meta['tmdb_id'] = meta['id'] | |
| # if 'backdrop_path' in meta and meta['backdrop_path']: | |
| # _meta['backdrop_url'] = self.fanart + str(meta['backdrop_path']) | |
| # if 'original_language' in meta and meta['original_language']: | |
| # _meta['country'] = self.getLanguage(meta['original_language']) | |
| # if 'original_title' in meta and meta['original_title']: | |
| # _meta['originaltitle'] = meta['original_title'] | |
| # elif 'original_name' in meta and meta['original_name']: | |
| # _meta['originaltitle'] = meta['original_name'] | |
| # if 'overview' in meta and meta['overview']: | |
| # _meta['plot'] = meta['overview'] | |
| # if 'poster_path' in meta and meta['poster_path']: | |
| # _meta['cover_url'] = self.poster + str(meta['poster_path']) | |
| # if 'release_date' in meta and meta['release_date']: | |
| # _meta['premiered'] = meta['release_date'] | |
| # elif 'first_air_date' in meta and meta['first_air_date']: | |
| # _meta['premiered'] = meta['first_air_date'] | |
| # if 'premiered' in _meta and _meta['premiered'] and len(_meta['premiered']) == 10: | |
| # _meta['year'] = int(_meta['premiered'][:4]) | |
| # if 'budget' in meta and meta['budget']: | |
| # _meta['budget'] = "{:,} $".format(meta['budget']) | |
| # if 'revenue' in meta and meta['revenue']: | |
| # _meta['revenue'] = "{:,} $".format(meta['revenue']) | |
| # if 'status' in meta and meta['status']: | |
| # _meta['status'] = meta['status'] | |
| # duration = 0 | |
| # if 'runtime' in meta and meta['runtime']: | |
| # duration = int(meta['runtime']) | |
| # elif 'episode_run_time' in meta and meta['episode_run_time']: | |
| # duration = int(meta['episode_run_time'][0]) | |
| # if duration < 300: | |
| # duration *= 60 | |
| # if duration > 1: | |
| # _meta['duration'] = duration | |
| # if 'tagline' in meta and meta['tagline']: | |
| # _meta['tagline'] = meta['tagline'] | |
| # if 'vote_average' in meta and meta['vote_average']: | |
| # _meta['rating'] = meta['vote_average'] | |
| # if 'vote_count' in meta and meta['vote_count']: | |
| # _meta['votes'] = meta['vote_count'] | |
| # if 'genres' in meta and meta['genres']: | |
| # for genre in meta['genres']: | |
| # if _meta['genre'] == '': | |
| # _meta['genre'] += genre['name'] | |
| # else: | |
| # _meta['genre'] += ' / ' + genre['name'] | |
| # elif 'genre_ids' in meta and meta['genre_ids']: | |
| # genres = self.getGenresFromIDs(meta['genre_ids']) | |
| # for genre in genres: | |
| # if _meta['genre'] == '': | |
| # _meta['genre'] += genre | |
| # else: | |
| # _meta['genre'] += ' / ' + genre | |
| # if 'production_companies' in meta and meta['production_companies']: | |
| # _meta['studio'] = '' | |
| # for studio in meta['production_companies']: | |
| # if _meta['studio'] == '': | |
| # _meta['studio'] += studio['name'] | |
| # else: | |
| # _meta['studio'] += ' / ' + studio['name'] | |
| # if 'credits' in meta and meta['credits']: | |
| # strmeta = str(meta['credits']) | |
| # listCredits = eval(strmeta) | |
| # casts = listCredits['cast'] | |
| # crews = [] | |
| # if len(casts) > 0: | |
| # licast = [] | |
| # if 'crew' in listCredits: | |
| # crews = listCredits['crew'] | |
| # if len(crews) > 0: | |
| # _meta['credits'] = "{'cast': " + str(casts) + ", 'crew': " + str(crews) + "}" | |
| # for cast in casts: | |
| # licast.append((cast['name'], cast['character'], self.poster + str(cast['profile_path']), str(cast['id']))) | |
| # _meta['cast'] = licast | |
| # else: | |
| # _meta['credits'] = "{'cast': " + str(casts) + '}' | |
| # if len(crews) > 0: | |
| # _meta['writer'] = '' | |
| # for crew in crews: | |
| # if crew['job'] == 'Director': | |
| # _meta['director'] = crew['name'] | |
| # elif crew['department'] == 'Writing': | |
| # if _meta['writer'] != '': | |
| # _meta['writer'] += ' / ' | |
| # _meta['writer'] += '%s: %s' % (crew['job'], crew['name']) | |
| # elif crew['department'] == 'Production' and 'Producer' in crew['job']: | |
| # if _meta['writer'] != '': | |
| # _meta['writer'] += ' / ' | |
| # _meta['writer'] += '%s: %s' % (crew['job'], crew['name']) | |
| # if 'trailers' in meta and meta['trailers']: | |
| # if 'youtube' in meta['trailers']: | |
| # trailers = '' | |
| # for t in meta['trailers']['youtube']: | |
| # if t['type'] == 'Trailer': | |
| # trailers = self.URL_TRAILER % t['source'] | |
| # if trailers: | |
| # _meta['trailer'] = trailers | |
| # elif 'videos' in meta and meta['videos']: | |
| # if 'results' in meta['videos']: | |
| # trailers = '' | |
| # for t in meta['videos']['results']: | |
| # if t['type'] == 'Trailer' and t['site'] == 'YouTube': | |
| # trailers = self.URL_TRAILER % t['key'] | |
| # if trailers: | |
| # _meta['trailer'] = trailers | |
| # return _meta | |
| def search_term(self, mediaType, name, page=1): | |
| if not mediaType in ["movie", "tvshow", "person"]: return | |
| urlType = mediaType if not mediaType == 'tvshow' else 'tv' | |
| try: | |
| meta = self._call('search/'+ urlType, 'query=' + name + '&page=' + str(page)) | |
| if 'errors' in meta and 'status_code' in meta: return [], 0 | |
| elif 'total_results' in meta and meta['total_results'] == 0: return [], 0 | |
| else: | |
| list = [] | |
| if urlType == 'person': | |
| for i in meta['results']: | |
| if i['known_for_department'] != "Acting": continue | |
| poster = self.poster + str(i['profile_path']) if i['profile_path'] != None else None | |
| popularity = int(str(i['popularity']).replace('.','')) | |
| list.append({'id': i['id'], 'name': i['name'], "poster": poster, 'popularity': popularity}) | |
| else: | |
| for i in meta['results']: | |
| try: | |
| if i['genre_ids']: | |
| if self.searchDoku == 'false' and 99 in i['genre_ids']: continue | |
| list.append(i['id']) | |
| except: | |
| pass | |
| return list, meta['total_pages'] | |
| except: | |
| return | |
| def search_credits(self, Type, id): | |
| # https://developers.themoviedb.org/3/people/get-person-combined-credits | |
| if not Type in ["combined_credits", "tv_credits", "movie_credits"]: return | |
| meta = self._call('person/' + str(id) + '/' + Type) | |
| meta = meta['cast'] | |
| #meta = self._formatSuper(meta['cast'], '') | |
| list = [] | |
| for i in meta: | |
| if i['genre_ids'] and self.searchDoku == 'false' and 99 in i['genre_ids']: continue | |
| if i['character'] and ('voice' or 'rumored' or 'uncredited') in i['character']: continue | |
| i.update({'popularity':int(str(i['popularity']).replace('.',''))}) | |
| if Type == "movie_credits": i.update({'mediatype': 'movie'}) | |
| elif Type == "tv_credits": i.update({'mediatype': 'tvshow'}) | |
| list.append(i) | |
| return list | |
| def _formatSuper(self, meta, name): | |
| try: | |
| _meta = {} | |
| # ID | |
| # if meta['id'] == 479455: | |
| if 'id' in meta: | |
| _meta['tmdb_id'] = str(meta['id']) | |
| if 'external_ids' in meta: | |
| if meta['external_ids']['imdb_id']: _meta['imdbnumber'] = meta['external_ids']['imdb_id'] | |
| if meta['external_ids']['tvdb_id']: _meta['tvdb_id'] = str(meta['external_ids']['tvdb_id']) | |
| if not 'imdbnumber' in _meta and 'imdb_id' in meta: _meta['imdbnumber'] = meta['imdb_id'] | |
| if 'imdbnumber' in _meta: _meta['imdb_id'] = _meta['imdbnumber'] | |
| try: | |
| _meta['mediatype'] = meta['mediatype'] if 'mediatype' in meta else None | |
| if not _meta['mediatype'] and 'media_type' in meta: _meta['mediatype'] = meta['media_type'] | |
| except: | |
| print(meta['media_type']) | |
| pass | |
| if 'backdrop_path' in meta and meta['backdrop_path']: | |
| _meta['fanart'] = self.fanart + str(meta['backdrop_path']) | |
| _meta['backdrop_url'] = self.fanart + str(meta['backdrop_path']) | |
| if 'original_language' in meta and meta['original_language']: | |
| _meta['originallanguage'] = self.getLanguage(meta['original_language']) | |
| if 'original_title' in meta and meta['original_title']: | |
| _meta['originaltitle'] = meta['original_title'] | |
| elif 'original_name' in meta and meta['original_name']: | |
| _meta['originaltitle'] = meta['original_name'] | |
| if 'title' in meta and meta['title']: | |
| _meta['title'] = meta['title'] | |
| elif 'name' in meta and meta['name']: | |
| _meta['title'] = meta['name'] | |
| else: | |
| _meta['title'] = _meta['originaltitle'] | |
| # if _meta['tmdb_id'] == '48866': | |
| # import pydevd | |
| # pydevd.settrace('localhost', port=12345, stdoutToServer=True, stderrToServer=True) | |
| # if 'overview' in meta and len(meta['overview'].strip()) > 5 : | |
| # _meta['plot'] = meta['overview'] | |
| # else: | |
| urlType = _meta['mediatype'] if _meta['mediatype'] == 'movie' else 'tv' | |
| overviews = self._call(urlType + '/' + str(_meta['tmdb_id']) + '/translations') | |
| # overview = overviews['translations'][0]['data']['overview'] | |
| if len(overviews['translations']) > 0: | |
| overviews = overviews['translations'] | |
| for overview in overviews: | |
| if overview['name'] == "Deutsch" or overview['iso_639_1'] == "de": # or overview['name'] == "English": | |
| _meta.update({'plot': overview['data']['overview']}) | |
| break | |
| elif not 'plot' in _meta and not 'overview' in _meta and overview['name'] == "English": | |
| _meta.update({'plot': overview['data']['overview']}) | |
| if not 'plot' in _meta: | |
| if 'overview' in meta and len(meta['overview'].strip()) > 5: | |
| _meta['plot'] = meta['overview'] | |
| else: | |
| _meta['plot'] = '' | |
| if 'poster_path' in meta and meta['poster_path']: | |
| _meta['poster'] = self.poster + str(meta['poster_path']) | |
| _meta['cover_url'] = self.poster + str(meta['poster_path']) | |
| if 'release_date' in meta and meta['release_date']: | |
| _meta['premiered'] = meta['release_date'] | |
| elif 'first_air_date' in meta and meta['first_air_date']: | |
| _meta['premiered'] = meta['first_air_date'] | |
| if 'premiered' in _meta and _meta['premiered'] and len(_meta['premiered']) == 10: | |
| _meta['year'] = int(_meta['premiered'][:4]) | |
| if 'budget' in meta and meta['budget']: | |
| _meta['budget'] = "{:,} $".format(meta['budget']) | |
| if 'revenue' in meta and meta['revenue']: | |
| _meta['revenue'] = "{:,} $".format(meta['revenue']) | |
| if 'status' in meta and meta['status']: | |
| _meta['status'] = meta['status'] | |
| duration = 0 | |
| if 'runtime' in meta and meta['runtime']: | |
| duration = int(meta['runtime']) | |
| elif 'episode_run_time' in meta and meta['episode_run_time']: | |
| duration = int(meta['episode_run_time'][0]) | |
| if duration < 300: | |
| duration *= 60 | |
| if duration > 1: | |
| _meta['duration'] = duration | |
| if 'tagline' in meta and meta['tagline']: | |
| _meta['tagline'] = meta['tagline'] | |
| if 'vote_average' in meta and meta['vote_average']: | |
| _meta['rating'] = meta['vote_average'] | |
| if 'vote_count' in meta and meta['vote_count']: | |
| _meta['votes'] = meta['vote_count'] | |
| _meta['genre'] = () | |
| if 'genres' in meta and meta['genres']: | |
| for genre in meta['genres']: | |
| if 'name' in genre and genre['name']: | |
| _meta['genre'] = _meta['genre'] + (genre['name'],) | |
| elif 'genre_ids' in meta and meta['genre_ids']: | |
| _meta['genre_ids'] = meta['genre_ids'] | |
| genres = self.getGenresFromIDs(meta['genre_ids']) | |
| for genre in genres: | |
| _meta['genre'] = _meta['genre'] + (genre,) | |
| if 'production_companies' in meta and meta['production_companies']: | |
| _meta['studio'] = () | |
| for studio in meta['production_companies']: | |
| _meta['studio'] = _meta['studio'] + (studio['name'],) | |
| if 'production_countries' in meta and meta['production_countries']: | |
| _meta['country'] = () | |
| for country in meta['production_countries']: | |
| _meta['country'] = _meta['country'] + (country['name'],) | |
| if 'credits' in meta and meta['credits']: | |
| # crews = [] | |
| # casts = [] | |
| strmeta = str(meta['credits']) | |
| listCredits = eval(strmeta) | |
| _meta['writer'] = () | |
| _meta['director'] = () | |
| if 'crew' in listCredits and len(listCredits['crew']) > 0: | |
| crews = listCredits['crew'] | |
| for crew in crews: | |
| if crew['job'] == 'Director': | |
| _meta['director'] = _meta['director'] + (crew['name'],) | |
| elif crew['department'] == 'Writing': | |
| _meta['writer'] = _meta['writer'] + ('%s: %s' % (crew['job'], crew['name']),) | |
| elif crew['department'] == 'Production' and 'Producer' in crew['job']: | |
| _meta['writer'] = _meta['writer'] + ('%s: %s' % (crew['job'], crew['name']),) | |
| if 'cast' in listCredits and len(listCredits['cast']) > 0: | |
| licast = [] | |
| casts = listCredits['cast'] | |
| for cast in casts: | |
| # licast.append((cast['name'], cast['character'], self.poster + str(cast['profile_path']), str(cast['id']))) | |
| licast.append( | |
| ## {"name": cast['name'], "role": cast['character'], "thumbnail": self.poster + str(cast['profile_path']), 'order': str(cast['order'])} | |
| {"name": cast['name'], "role": cast['character'], "thumbnail": self.poster + str(cast['profile_path']), 'order': int(cast['order'])} | |
| ) | |
| _meta['cast'] = licast | |
| # if len(casts) > 0 and len(crews) > 0: | |
| # _meta['credits'] = "{'cast': " + str(casts) + ", 'crew': " + str(crews) + "}" | |
| # elif len(casts) > 0: | |
| # _meta['credits'] = "{'cast': " + str(casts) + '}' | |
| # elif len(crews) > 0: | |
| # _meta['credits'] = "{'crew': " + str(crews) + '}' | |
| # if 'trailers' in meta and meta['trailers']: | |
| # if 'youtube' in meta['trailers']: | |
| # trailers = '' | |
| # for t in meta['trailers']['youtube']: | |
| # if t['type'] == 'Trailer': | |
| # trailers = self.URL_TRAILER % t['source'] | |
| # if trailers: | |
| # _meta['trailer'] = trailers | |
| # elif 'videos' in meta and meta['videos']: | |
| # if 'results' in meta['videos']: | |
| # trailers = '' | |
| # for t in meta['videos']['results']: | |
| # if t['type'] == 'Trailer' and t['site'] == 'YouTube': | |
| # trailers = self.URL_TRAILER % t['key'] | |
| # if trailers: | |
| # _meta['trailer'] = trailers | |
| if 'number_of_seasons' in meta and meta['number_of_seasons']: | |
| _meta['number_of_seasons'] = meta['number_of_seasons'] | |
| if 'alternative_titles' in meta and meta['alternative_titles']: | |
| titles = 'titles' if _meta['mediatype'] == 'movie' else 'results' | |
| strtitles = str(meta['alternative_titles']) | |
| listAliases = eval(strtitles) | |
| origin_country = str(meta['origin_country'][0]) if 'origin_country' in meta else '' | |
| if len(listAliases[titles]) > 0: | |
| lialiases = listAliases[titles] | |
| # aliases = [i['title'] for i in lialiases if i['iso_3166_1'] in ['DE', 'US', 'EN', 'AT', 'CN', origin_country]] | |
| aliases = [i['title'] for i in lialiases if i['iso_3166_1'] in ['DE', 'US', 'EN', 'AT', origin_country]] | |
| _meta['aliases'] = aliases | |
| return _meta | |
| except Exception as e: | |
| print(e) | |
| pass |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # 2022-04-10 | |
| # edit | |
| import xbmcgui | |
| import time | |
| from resources.lib.ParameterHandler import ParameterHandler | |
| from resources.lib import control | |
| #from resources.lib import pyaes | |
| import re, hashlib, sys, xbmc | |
| try: | |
| from urlparse import urlparse | |
| from htmlentitydefs import name2codepoint | |
| from urllib import quote, unquote, quote_plus, unquote_plus | |
| except ImportError: | |
| from urllib.parse import quote, unquote, quote_plus, unquote_plus, urlparse | |
| from html.entities import name2codepoint | |
| class cParser: | |
| @staticmethod | |
| def parseSingleResult(sHtmlContent, pattern): | |
| aMatches = None | |
| if sHtmlContent: | |
| aMatches = re.compile(pattern).findall(sHtmlContent) | |
| if len(aMatches) == 1: | |
| aMatches[0] = cParser.replaceSpecialCharacters(aMatches[0]) | |
| return True, aMatches[0] | |
| return False, aMatches | |
| @staticmethod | |
| def replaceSpecialCharacters(s): | |
| for t in (('\\/', '/'), ('&', '&'), ('\\u00c4', 'Ä'), ('\\u00e4', 'ä'), | |
| ('\\u00d6', 'Ö'), ('\\u00f6', 'ö'), ('\\u00dc', 'Ü'), ('\\u00fc', 'ü'), | |
| ('\\u00df', 'ß'), ('\\u2013', '-'), ('\\u00b2', '²'), ('\\u00b3', '³'), | |
| ('\\u00e9', 'é'), ('\\u2018', '‘'), ('\\u201e', '„'), ('\\u201c', '“'), | |
| ('\\u00c9', 'É'), ('\\u2026', '...'), ('\\u202fh', 'h'), ('\\u2019', '’'), | |
| ('\\u0308', '̈'), ('\\u00e8', 'è'), ('#038;', ''), ('\\u00f8', 'ø'), | |
| ('/', '/'), ('\\u00e1', 'á'), ('–', '-'), ('“', '“'), ('„', '„'), | |
| ('’', '’'), ('…', '…'), (''', "'")): | |
| try: | |
| s = s.replace(*t) | |
| except: | |
| pass | |
| try: | |
| re.sub(u'é', 'é', s) | |
| re.sub(u'É', 'É', s) | |
| # kill all other unicode chars | |
| r = re.compile(r'[^\W\d_]', re.U) | |
| r.sub('', s) | |
| except: | |
| pass | |
| return s | |
| @staticmethod | |
| def parse(sHtmlContent, pattern, iMinFoundValue=1, ignoreCase=False): | |
| aMatches = None | |
| if sHtmlContent: | |
| sHtmlContent = cParser.replaceSpecialCharacters(sHtmlContent) | |
| if ignoreCase: | |
| aMatches = re.compile(pattern, re.DOTALL | re.I).findall(sHtmlContent) | |
| else: | |
| aMatches = re.compile(pattern, re.DOTALL).findall(sHtmlContent) | |
| if len(aMatches) >= iMinFoundValue: | |
| return True, aMatches | |
| return False, aMatches | |
| @staticmethod | |
| def replace(pattern, sReplaceString, sValue): | |
| return re.sub(pattern, sReplaceString, sValue) | |
| @staticmethod | |
| def search(sSearch, sValue): | |
| return re.search(sSearch, sValue, re.IGNORECASE) | |
| @staticmethod | |
| def escape(sValue): | |
| return re.escape(sValue) | |
| @staticmethod | |
| def getNumberFromString(sValue): | |
| pattern = r'\d+' | |
| aMatches = re.findall(pattern, sValue) | |
| if len(aMatches) > 0: | |
| return int(aMatches[0]) | |
| return 0 | |
| @staticmethod | |
| def urlparse(sUrl): | |
| return urlparse(sUrl.replace('www.', '')).netloc.title() | |
| @staticmethod | |
| def urlDecode(sUrl): | |
| return unquote(sUrl) | |
| @staticmethod | |
| def urlEncode(sUrl, safe=''): | |
| return quote(sUrl, safe) | |
| @staticmethod | |
| def unquotePlus(sUrl): | |
| return unquote_plus(sUrl) | |
| @staticmethod | |
| def quotePlus(sUrl): | |
| return quote_plus(sUrl) | |
| @staticmethod | |
| def B64decode(text): | |
| import base64 | |
| if sys.version_info[0] == 2: | |
| b = base64.b64decode(text) | |
| else: | |
| b = base64.b64decode(text).decode('utf-8') | |
| return b | |
| class logger: | |
| @staticmethod | |
| def info(sInfo): | |
| if sys.version_info[0] == 2: | |
| logger.__writeLog(sInfo, cLogLevel=xbmc.LOGNOTICE) | |
| else: | |
| logger.__writeLog(sInfo, cLogLevel=xbmc.LOGINFO) | |
| @staticmethod | |
| def warning(sInfo): | |
| logger.__writeLog(sInfo, cLogLevel=xbmc.LOGWARNING) | |
| @staticmethod | |
| def debug(sInfo): | |
| logger.__writeLog(sInfo, cLogLevel=xbmc.LOGDEBUG) | |
| @staticmethod | |
| def error(sInfo): | |
| logger.__writeLog(sInfo, cLogLevel=xbmc.LOGERROR) | |
| @staticmethod | |
| def fatal(sInfo): | |
| logger.__writeLog(sInfo, cLogLevel=xbmc.LOGFATAL) | |
| @staticmethod | |
| def __writeLog(sLog, cLogLevel=xbmc.LOGDEBUG): | |
| params = ParameterHandler() | |
| try: | |
| if sys.version_info[0] == 2: | |
| if isinstance(sLog, unicode): | |
| sLog = '%s (ENCODED)' % (sLog.encode('utf-8')) | |
| if params.exist('site'): | |
| site = params.getValue('site') | |
| sLog = "\t[%s] -> %s: %s" % (control.addonName, site, sLog) | |
| else: | |
| sLog = "\t[%s] %s" % (control.addonName, sLog) | |
| xbmc.log(sLog, cLogLevel) | |
| except Exception as e: | |
| xbmc.log('Logging Failure: %s' % e, cLogLevel) | |
| pass | |
| # class cUtil: | |
| # @staticmethod | |
| # def removeHtmlTags(sValue, sReplace=''): | |
| # p = re.compile(r'<.*?>') | |
| # return p.sub(sReplace, sValue) | |
| # | |
| # @staticmethod | |
| # def unescape(text): | |
| # def fixup(m): | |
| # text = m.group(0) | |
| # if not text.endswith(';'): text += ';' | |
| # if text[:2] == '&#': | |
| # try: | |
| # if text[:3] == '&#x': | |
| # return unichr(int(text[3:-1], 16)) | |
| # else: | |
| # return unichr(int(text[2:-1])) | |
| # except ValueError: | |
| # pass | |
| # else: | |
| # try: | |
| # text = unichr(name2codepoint[text[1:-1]]) | |
| # except KeyError: | |
| # pass | |
| # return text | |
| # | |
| # if isinstance(text, str): | |
| # try: | |
| # text = text.decode('utf-8') | |
| # except Exception: | |
| # try: | |
| # text = text.decode('utf-8', 'ignore') | |
| # except Exception: | |
| # pass | |
| # return re.sub("&(\\w+;|#x?\\d+;?)", fixup, text.strip()) | |
| # | |
| # @staticmethod | |
| # def cleanse_text(text): | |
| # if text is None: text = '' | |
| # text = cUtil.removeHtmlTags(text) | |
| # if sys.version_info[0] == 2: | |
| # text = cUtil.unescape(text) | |
| # if isinstance(text, unicode): | |
| # text = text.encode('utf-8') | |
| # | |
| # text = text.replace('\\xc3\\x84', 'Ä').replace('\\xc3\\xa4', 'ä') | |
| # text = text.replace('\\xc3\\x96', 'Ö').replace('\\xc3\\xb6', 'ö') | |
| # text = text.replace('\\xc3\\x9c', 'Ü').replace('\\xc3\\xbc', 'ü') | |
| # text = text.replace('\\xc3\\x9f', 'ß').replace("\\'", "'") | |
| # | |
| # return text | |
| # | |
| # @staticmethod | |
| # def evp_decode(cipher_text, passphrase, salt=None): | |
| # if not salt: | |
| # salt = cipher_text[8:16] | |
| # cipher_text = cipher_text[16:] | |
| # key, iv = cUtil.evpKDF(passphrase, salt) | |
| # decrypter = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(key, iv)) | |
| # plain_text = decrypter.feed(cipher_text) | |
| # plain_text += decrypter.feed() | |
| # return plain_text.decode("utf-8") | |
| # | |
| # @staticmethod | |
| # def evpKDF(pwd, salt, key_size=32, iv_size=16): | |
| # temp = b'' | |
| # fd = temp | |
| # while len(fd) < key_size + iv_size: | |
| # h = hashlib.md5() | |
| # h.update(temp + pwd + salt) | |
| # temp = h.digest() | |
| # fd += temp | |
| # key = fd[0:key_size] | |
| # iv = fd[key_size:key_size + iv_size] | |
| # return key, iv | |
| # class cCache(object): | |
| # _win = None | |
| # def __init__(self): | |
| # # see https://kodi.wiki/view/Window_IDs | |
| # # use WINDOW_SCREEN_CALIBRATION to store all data | |
| # self._win = xbmcgui.Window(10011) | |
| # | |
| # def __del__(self): | |
| # del self._win | |
| # | |
| # def get(self, key, cache_time): | |
| # cachedata = self._win.getProperty(key) | |
| # | |
| # if cachedata: | |
| # cachedata = eval(cachedata) | |
| # if time.time() - cachedata[0] < cache_time: | |
| # return cachedata[1] | |
| # else: | |
| # self._win.clearProperty(key) | |
| # | |
| # return None | |
| # | |
| # def set(self, key, data): | |
| # self._win.setProperty(key, repr((time.time(), data))) | |
| # | |
| # def clear(self): | |
| # self._win.clearProperties() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # edit 2025-07-14 | |
| # Sammelsurium | |
| import json, os, re | |
| import unicodedata | |
| import requests | |
| import xbmcvfs | |
| from resources.lib.control import urlparse, showparentdiritems, currentWindowId, getInfoLabel, sleep, getSetting, urlretrieve, quote_plus, progressDialog | |
| from six.moves import urllib_error, urllib_request, urllib_parse | |
| from operator import itemgetter | |
| from functools import cmp_to_key | |
| from resources.lib import log_utils | |
| def getHostDict(): | |
| hostblockDict = ['flashx', 'streamlare', 'evoload', 'drop.download'] # permanenter Block | |
| blockedHoster = getSetting('hosts.filter').split(',') # aus setting.xml blockieren | |
| if len(blockedHoster) <= 1: blockedHoster = getSetting('hosts.filter').split() | |
| for i in blockedHoster: hostblockDict.append(i.lower()) | |
| return hostblockDict | |
| def isBlockedHoster(url, isResolve=True): | |
| import html | |
| import resolveurl as resolver | |
| from resources.lib import log_utils | |
| requests.packages.urllib3.disable_warnings() | |
| from resources.lib.requestHandler import cRequestHandler | |
| if url.startswith("//"): url = 'http:%s' % url | |
| if urlparse(url).hostname and urlparse(url).scheme: | |
| UA = cRequestHandler.RandomUA() | |
| headers = { | |
| "referer": urlparse(url).scheme +'://' + urlparse(url).hostname + '/', | |
| "user-agent": UA, | |
| } | |
| try: | |
| r = requests.head(url, verify=False, headers=headers, timeout=3) | |
| except: | |
| sDomain = urlparse(url).path if urlparse(url).hostname == None else urlparse(url).hostname | |
| return True, sDomain, url, 100 | |
| status_code = r.status_code | |
| if 300 <= status_code <= 400: | |
| url = r.headers['Location'] | |
| ## TODO moflix, fileions etc 404 | |
| # elif status_code != 200: | |
| # sDomain = urlparse(url).path if urlparse(url).hostname == None else urlparse(url).hostname | |
| # return True, sDomain, url, 100 | |
| sDomain = urlparse(url).path if urlparse(url).hostname == None else urlparse(url).hostname | |
| hostblockDict = getHostDict() | |
| prioHoster = 100 | |
| for i in hostblockDict: | |
| if i in sDomain.lower() or i.split('.')[0] in sDomain.lower(): return True, sDomain, url, prioHoster | |
| if isResolve: | |
| try: | |
| url = html.unescape(url) # https://github.com/Gujal00/ResolveURL/pull/1115 | |
| hmf = resolver.HostedMediaFile(url=url, include_disabled=True, include_universal=False) | |
| if hmf.valid_url(): | |
| sUrl = hmf.resolve() | |
| try: prioHoster = hmf._HostedMediaFile__resolvers[0].priority | |
| except: pass | |
| return False, sDomain, sUrl, prioHoster | |
| else: | |
| log_utils.log('In resolveUrl keine Domain für Url %s' % url, log_utils.LOGWARNING) | |
| return True, sDomain, url, prioHoster | |
| except: | |
| return True, sDomain, url, prioHoster | |
| else: | |
| status = resolver.relevant_resolvers(domain=sDomain) | |
| if status == []: return True, sDomain, url, prioHoster | |
| else: | |
| prioHoster = status[0].priority | |
| return False, sDomain, url, prioHoster | |
| # elif checkResolver: # Überprüfung in resolveUrl | |
| # if resolver.relevant_resolvers(domain=sDomain) == []: # sDomain nicht in resolveUrl gefunden | |
| # log_utils.log('In resolveUrl keine Domain für Url %s' % url, log_utils.LOGWARNING) | |
| # return True, sDomain, prioHoster | |
| # return False, sDomain, prioHoster | |
| def cmp(x, y): | |
| """ | |
| Replacement for built-in function cmp that was removed in Python 3 | |
| Compare the two objects x and y and return an integer according to | |
| the outcome. The return value is negative if x < y, zero if x == y | |
| and strictly positive if x > y. | |
| https://portingguide.readthedocs.io/en/latest/comparisons.html#the-cmp-function | |
| """ | |
| return (x > y) - (x < y) | |
| def multikeysort(items, columns): | |
| # a = multikeysort(b, ['-column1', 'column2']) # - revers / b z.B. self.list | |
| comparers = [ | |
| ((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) | |
| for col in columns | |
| ] | |
| def comparer(left, right): | |
| comparer_iter = ( | |
| cmp(fn(left), fn(right)) * mult | |
| for fn, mult in comparers | |
| ) | |
| return next((result for result in comparer_iter if result), 0) | |
| return sorted(items, key=cmp_to_key(comparer)) | |
| def getExtIDS(imdb, type): # get external IDS | |
| # V2_API_KEY = getSetting('api.trakt').strip() | |
| # e:\__DEV-19\devkodi\addons\metadata.themoviedb.org.python\python\lib\tmdbscraper\ | |
| V2_API_KEY = '5f2dc73b6b11c2ac212f5d8b4ec8f3dc4b727bb3f026cd254d89eda997fe64ae' # 4a65e1e644af74c98f9f2b3884669deb3fac9531ee71f39babf1dee46d264d17 | |
| headers = {'Content-Type': 'application/json', 'trakt-api-key': V2_API_KEY, 'trakt-api-version': '2'} | |
| url = 'https://api.trakt.tv/{0}/{1}/?extended=full'.format(type, imdb) | |
| result = requests.get(url, headers=headers) | |
| if result.status_code == 200: | |
| result = json.loads(result.content) | |
| return result['ids'] | |
| else: | |
| return [], '' | |
| def getAliases(imdb, type): | |
| # V2_API_KEY = getSetting('api.trakt').strip() | |
| V2_API_KEY = '5f2dc73b6b11c2ac212f5d8b4ec8f3dc4b727bb3f026cd254d89eda997fe64ae' | |
| headers = {'Content-Type': 'application/json', 'trakt-api-key': V2_API_KEY, 'trakt-api-version': '2'} | |
| aliasesUrl = 'https://api.trakt.tv/{0}/{1}/aliases'.format(type, imdb) | |
| result = requests.get(aliasesUrl, headers=headers) | |
| if result.status_code == 200: | |
| result = json.loads(result.content) | |
| localtitle = [i['title'] for i in result if i['country'] in ['de']] | |
| localtitle = localtitle[0] if any(localtitle) else None | |
| return [i['title'] for i in result if i['country'] in ['de', 'us', 'en', 'at', '']], localtitle | |
| else: | |
| return [], '' | |
| def aliases_to_array(aliases, filter=None): | |
| try: | |
| if not filter: | |
| filter = [] | |
| if isinstance(filter, type(u"")): | |
| filter = [filter] | |
| return [x.get('title') for x in aliases if not filter or x.get('country') in filter] | |
| except: | |
| return [] | |
| def getsearch(title): | |
| if title is None: | |
| return | |
| title = title.lower() | |
| title = re.sub(r'&#(\d+);', '', title) | |
| title = re.sub(r'(&#[0-9]+)([^;^0-9]+)', r'\1;\2', title) | |
| title = title.replace('"', '\"').replace('&', '&') | |
| # title = re.sub('\\\|/|-|â€"|:|;|\*|\?|"|\'|<|>|\|', '', title).lower() | |
| title = re.sub(r'[\\/\-â€":;*?"\'<>|]', '', title).lower() | |
| title = re.sub(r'\s+', ' ', title) | |
| return title | |
| def get_titles_for_search(localtitle, title, aliases): | |
| titles = [] | |
| try: | |
| if "country':" in str(aliases): aliases = aliases_to_array(aliases) | |
| if localtitle != '': | |
| localtitle = localtitle.lower() | |
| titles.append(localtitle) | |
| titles.append(getsearch(localtitle)) | |
| if title != '': | |
| title = title.lower() | |
| if localtitle != title: | |
| titles.append(title) | |
| titles.append(getsearch(title)) | |
| for i in aliases: | |
| try: | |
| #if str(i).lower() != title and str(i).lower() != localtitle and i != '' : | |
| if not str(i).lower() in titles: | |
| titles.append(str(i).lower()) | |
| j = getsearch(str(i)) | |
| if not j.lower() in titles: | |
| titles.append(j) | |
| except: | |
| pass | |
| #titles = [str(i) for i in titles if all(ord(c) < 128 for c in i)] | |
| titles = [item for i, item in enumerate(titles) if item not in titles[:i]] | |
| titles = more_titles(titles) | |
| return titles | |
| except: | |
| return titles | |
| #TODO | |
| # def title_article(titles): | |
| # try: | |
| # articles_en = ['the'] # ['the', 'a', 'an'] | |
| # articles_de = ['die', 'der'] # ['der', 'die', 'das'] | |
| # for title in titles: | |
| # match = re.match('^((\w+)\s+)', title.lower()) | |
| # if match and match.group(2) in articles_en: | |
| # for i in articles_de: | |
| # title = title.replace(title[:3], i) | |
| # if title not in titles: titles.append(title) | |
| # return titles | |
| # except: | |
| # return titles | |
| def more_titles(titles): | |
| for i in titles: | |
| temp = _titleclean(i) | |
| if temp and temp not in titles: | |
| titles.append(temp) | |
| return titles | |
| def _titleclean(title): | |
| try: | |
| if 'IV' == title.rsplit(' ',1)[1]: | |
| title.replace(' IV', ' 4') | |
| elif 'VI' == title.rsplit(' ',1)[1]: | |
| title.replace(' VI', ' 6') | |
| elif 'V' == title.rsplit(' ',1)[1]: | |
| title.replace(' V', ' 5') | |
| elif 'III' == title.rsplit(' ',1)[1]: | |
| title.replace(' III', ' 3') | |
| elif 'II' == title.rsplit(' ',1)[1]: | |
| title.replace(' II', ' 2') | |
| # elif 'I' == title.rsplit(' ',1)[1]: | |
| # title.replace('I', '1') | |
| elif '2' == title.rsplit(' ',1)[1]: | |
| title.replace(' 2', ' II') | |
| elif '3' == title.rsplit(' ',1)[1]: | |
| title.replace(' 3', ' III') | |
| elif '4' == title.rsplit(' ',1)[1]: | |
| title.replace(' 4', ' IV') | |
| elif '5' == title.rsplit(' ',1)[1]: | |
| title.replace(' 5', ' V') | |
| elif '6' == title.rsplit(' ',1)[1]: | |
| title.replace(' 6', ' VI') | |
| return title | |
| except: | |
| pass | |
| def check_302(url, headers={}): | |
| try: | |
| while True: | |
| host = urlparse(url).netloc | |
| headers.update({'Host': host}) | |
| r = requests.get(url, allow_redirects=False, headers=headers, timeout=7) | |
| if 300 <= r.status_code <= 400: | |
| url = r.headers['Location'] | |
| elif 400 <= r.status_code: | |
| return | |
| elif 200 == r.status_code: | |
| return url | |
| elif 300 > r.status_code: | |
| return url | |
| else: | |
| break | |
| return | |
| except: | |
| return | |
| def test_stream(stream_url): | |
| """ | |
| Returns True if the stream_url gets a non-failure http status (i.e. <400) back from the server | |
| otherwise return False | |
| Intended to catch stream urls returned by resolvers that would fail to playback | |
| """ | |
| # parse_qsl doesn't work because it splits elements by ';' which can be in a non-quoted UA | |
| try: | |
| headers = dict([item.split('=') for item in (stream_url.split('|')[1]).split('&')]) | |
| except: | |
| headers = {} | |
| for header in headers: | |
| headers[header] = urllib_parse.unquote_plus(headers[header]) | |
| log_utils.log('Setting Headers on UrlOpen: %s' % headers, log_utils.LOGDEBUG) | |
| import ssl | |
| try: | |
| #- streamurl mit ungültigen Zertifikat abweisen | |
| ssl_context = ssl.create_default_context() | |
| #ssl_context.check_hostname = False | |
| #ssl_context.verify_mode = ssl.CERT_NONE | |
| opener = urllib_request.build_opener(urllib_request.HTTPSHandler(context=ssl_context)) | |
| urllib_request.install_opener(opener) | |
| except: | |
| pass | |
| try: | |
| msg = '' | |
| request = urllib_request.Request(stream_url.split('|')[0], headers=headers) | |
| # only do a HEAD request. gujal | |
| request.get_method = lambda: 'HEAD' | |
| # set urlopen timeout to 15 seconds | |
| http_code = urllib_request.urlopen(request, timeout=15).getcode() | |
| except urllib_error.HTTPError as e: | |
| if isinstance(e, urllib_error.HTTPError): | |
| http_code = e.code | |
| if http_code == 405: | |
| http_code = 200 | |
| else: | |
| http_code = 600 | |
| except urllib_error.URLError as e: | |
| http_code = 500 | |
| if hasattr(e, 'reason'): | |
| # treat an unhandled url type as success | |
| if 'unknown url type' in str(e.reason).lower(): | |
| return True | |
| elif 'certificate verify failed' in str(e.reason).lower(): | |
| return True | |
| else: | |
| msg = e.reason | |
| if not msg: | |
| msg = str(e) | |
| except Exception as e: | |
| http_code = 601 | |
| msg = str(e) | |
| if msg == "''": | |
| http_code = 504 | |
| # added this log line for now so that we can catch any logs on streams that are rejected due to test_stream failures | |
| # we can remove it once we are sure this works reliably | |
| if int(http_code) >= 400 and int(http_code) != 504: | |
| log_utils.log('Stream UrlOpen Failed: Url: %s \n HTTP Code: %s Msg: %s' % (stream_url, http_code, msg), log_utils.LOGWARNING) | |
| return int(http_code) < 400 or int(http_code) == 504 | |
| #TODO | |
| def m3u8_check(stream_url): | |
| if not '.m3u8' in (stream_url.split('|')[0]).lower(): return stream_url | |
| try: | |
| headers = dict([item.split('=') for item in (stream_url.split('|')[1]).split('&')]) | |
| except: | |
| headers = {} | |
| for header in headers: | |
| headers[header] = urllib_parse.unquote_plus(headers[header]) | |
| req = urllib_request.Request(stream_url.split('|')[0], headers=headers) | |
| try: | |
| line = (urllib_request.urlopen(req).readlines()) | |
| if re.search(r'\.m4.', str(line)): return | |
| # if '.m4s' in str(line): | |
| # return | |
| # elif 'http' in str(line): | |
| # return stream_url | |
| else: | |
| return stream_url # new_m3u8(req, stream_url.split('|')[0]) | |
| except urllib_error.URLError as e: | |
| if hasattr(e, 'reason')and 'certificate verify failed' in str(e.reason).lower(): | |
| return stream_url | |
| return | |
| #TODO | |
| # def new_m3u8(req, url): | |
| # import xbmcvfs | |
| # new_m3u8_file = os.path.join(dataPath, 'temp.m3u8') | |
| # loc_playlist = os.path.join(dataPath, 'myPlaylist.m3u') | |
| # # scheme, netloc, path, query, frag = parse.urlsplit(url) | |
| # http_scheme = urllib_parse.urlparse(url).scheme | |
| # host = urllib_parse.urlparse(url).netloc | |
| # url_path = os.path.split(url)[0] + '/' | |
| # url_file = os.path.split(url)[1] | |
| # base_url = '%s://%s' % (http_scheme, host) | |
| # | |
| # data = '' | |
| # for line in urllib_request.urlopen(req).readlines(): | |
| # line = line.strip() | |
| # #a = line.decode("utf-8") | |
| # line = convert(line, http_scheme, base_url, url_path) | |
| # if line: | |
| # data = data + ('{0}\n'.format(line)) | |
| # #print(data) | |
| # m3u8 = xbmcvfs.File(new_m3u8_file, 'w') | |
| # m3u8.write(data) | |
| # m3u8.close() | |
| # if not xbmcvfs.exists(loc_playlist): | |
| # #L = "#EXTM3U \n#EXT-X-VERSION:3\n#EXT-X-STREAM-INF:PROGRAM-ID=1\n%s\n"] % str(new_m3u8_file) | |
| # #L = "#EXTM3U \n#EXTINF:0,temp.m3u8\nfile:///%s\n" % str(new_m3u8_file) | |
| # L = "#EXTM3U \n#EXT-X-VERSION:3\n#EXT-X-STREAM-INF:PROGRAM-ID=1\n%s\n" % str(new_m3u8_file) | |
| # loc_playlist = xbmcvfs.File(loc_playlist, 'w') | |
| # loc_playlist.write(L) | |
| # #loc_playlist.write(str(new_m3u8_file)) | |
| # loc_playlist.close() | |
| # return new_m3u8_file #loc_playlist new_m3u8_file | |
| # | |
| # | |
| # def convert(line, http_scheme, base_url, url_path): | |
| # if line.startswith('#EXT-X-MAP'): | |
| # pattern = '''URI=(?:'|")(.+?)(?:'|")''' | |
| # URI = re.search(pattern, line).group(1) | |
| # if URI.startswith('//'): URI = '%s:%s' % (http_scheme, URI) | |
| # elif URI.startswith('/'): URI = base_url + URI | |
| # elif URI.startswith('http'): return URI | |
| # else: URI = url_path + URI | |
| # return '#EXT-X-MAP:URI="%s"' % URI | |
| # elif line.startswith('#'): return line | |
| # elif line.startswith('http'): return line | |
| # elif line.startswith('//'): return '%s:%s' % (http_scheme, line) | |
| # elif line.startswith('/'): return base_url + line | |
| # else: return url_path + line | |
| def normalize(title): | |
| from sys import version_info | |
| try: | |
| if version_info[0] > 2: return title | |
| else: | |
| try: return title.decode('ascii').encode("utf-8") | |
| except: return str(''.join(c for c in unicodedata.normalize('NFKD', unicode(title.decode('utf-8'))) if unicodedata.category(c) != 'Mn')) | |
| except: | |
| return title | |
| # def normalize(title): | |
| # import codecs | |
| # try: | |
| # return codecs.decode(title, 'UTF-8') | |
| # except: | |
| # return title | |
| ## setzt Auswahl nach letzte als gesehen markierte Episode / Staffel | |
| def setPosition(pos, _name, content='movies'): # org.: episodes | |
| isdebug = True if getSetting('status.debug') == 'true' else False | |
| win = currentWindowId # win = xbmcgui.Window(xbmcgui.getCurrentWindowId()) | |
| pos = int(pos) | |
| pos_sp = pos if showparentdiritems() else pos - 1 | |
| count = 0 | |
| for count in range(1, 15): | |
| ccont = getInfoLabel("Container.Content") | |
| if ccont == content: break | |
| sleep(100) | |
| if isdebug: | |
| log_utils.log(_name + ' - Container.Content (1) - soll: %s ist: %s count: %s' % (content, getInfoLabel("Container.Content"), count), log_utils.LOGINFO) | |
| log_utils.log(_name + ' - System.CurrentControlID - old: %s ' % getInfoLabel("System.CurrentControlID"), log_utils.LOGINFO) | |
| log_utils.log(_name + ' - pos: %s - check: %s' % (pos, int(getInfoLabel("Container().CurrentItem"))), log_utils.LOGINFO) | |
| # setze Position | |
| for count in range(1, 15): | |
| try: | |
| cid = getInfoLabel("System.CurrentControlID") | |
| ctrl = win.getControl(int(cid)) | |
| except: | |
| sleep(200) | |
| continue | |
| ctrl.selectItem(pos_sp) | |
| sleep(100) | |
| check = int(getInfoLabel("Container().CurrentItem")) # % cid)) # Container().CurrentItem | |
| if pos == check: break | |
| if isdebug: | |
| log_utils.log(_name + ' - pos: %s - check: %s - count: %s' % (pos, int(getInfoLabel("Container().CurrentItem")),count), log_utils.LOGINFO) | |
| log_utils.log(_name + ' - System.CurrentControlID: %s' % getInfoLabel("System.CurrentControlID"), log_utils.LOGINFO) | |
| def getParams(_params): | |
| for key, value in _params.items(): | |
| try: | |
| exec("%s = %s" % (key, value)) | |
| except: | |
| exec ("%s = '%s'" % (key, value)) | |
| # Funktionen ab hier auch für xstream | |
| def translatePath(*args): | |
| from sys import version_info | |
| if version_info.major == 2: | |
| from xbmc import translatePath | |
| return translatePath(*args).decode("utf-8") | |
| else: | |
| from xbmcvfs import translatePath | |
| return translatePath(*args) | |
| def download_url(url, dest, dp=None): | |
| # download_url(url, src, dp=[None / True / False / Dialog]) | |
| if dp == None or dp == True: | |
| dp = progressDialog | |
| dp.create("URL Downloader", " \n Downloading File: [B]%s[/B]" % url.split('/')[-1]) | |
| elif dp == False: | |
| return urlretrieve(url, dest) | |
| try: | |
| dp.update(0) | |
| urlretrieve(url, dest, lambda nb, bs, fs, url=url: _pbhook(nb, bs, fs, dp)) | |
| dp.close() | |
| except: | |
| urlretrieve(url, dest) | |
| def _pbhook(numblocks, blocksize, filesize, dp): | |
| try: | |
| percent = min((numblocks * blocksize * 100) / filesize, 100) | |
| dp.update(int(percent)) | |
| except: | |
| percent = 100 | |
| dp.update(percent) | |
| if dp.iscanceled(): | |
| dp.close() | |
| raise Exception("Canceled") | |
| def unzip_recursive(path, dirs, dest): | |
| for directory in dirs: | |
| dirs_dir = os.path.join(path, directory) | |
| dest_dir = os.path.join(dest, directory) | |
| xbmcvfs.mkdir(dest_dir) | |
| dirs2, files = xbmcvfs.listdir(dirs_dir) | |
| if dirs2: | |
| unzip_recursive(dirs_dir, dirs2, dest_dir) | |
| for file in files: | |
| # unzip_file(os.path.join(dirs_dir, file.decode('utf-8')), os.path.join(dest_dir, file.decode('utf-8'))) | |
| unzip_file(os.path.join(dirs_dir, file), os.path.join(dest_dir, file)) | |
| def unzip_file(path, dest): | |
| ''' Unzip specific file. Path should start with zip:// ''' | |
| xbmcvfs.copy(path, dest) | |
| #LOG.debug("unzip: %s to %s", path, dest) | |
| def unzip(path, dest, folder=None): | |
| ''' Unzip file. zipfile module seems to fail on android with badziperror.''' | |
| path = quote_plus(path) | |
| root = "zip://" + path + '/' | |
| if folder: | |
| xbmcvfs.mkdir(os.path.join(dest, folder)) | |
| dest = os.path.join(dest, folder) | |
| root = get_zip_directory(root, folder) | |
| dirs, files = xbmcvfs.listdir(root) | |
| if dirs: | |
| unzip_recursive(root, dirs, dest) | |
| for file in files: | |
| unzip_file(os.path.join(root, file), os.path.join(dest, file)) | |
| #LOG.warn("Unzipped %s", path) | |
| def get_zip_directory(path, folder): | |
| dirs, files = xbmcvfs.listdir(path) | |
| if folder in dirs: | |
| return os.path.join(path, folder) | |
| for directory in dirs: | |
| result = get_zip_directory(os.path.join(path, directory), folder) | |
| if result: | |
| return result | |
| ## ist Müll !! | |
| # def remove_dir(path): | |
| # from xbmcvfs import rmdir, listdir, delete | |
| # dirList, flsList = listdir(path) | |
| # for fl in flsList: | |
| # delete(os.path.join(path, fl)) | |
| # for dr in dirList: | |
| # remove_dir(os.path.join(path, dr)) | |
| # ## rmdir(path) # gefährlich !!! | |
| def remove_dir(folder): | |
| import os, shutil, stat | |
| for filename in os.listdir(folder): | |
| if filename == '.idea': continue | |
| file_path = os.path.join(folder, filename) | |
| try: | |
| if os.path.isfile(file_path) or os.path.islink(file_path): | |
| if os.path.isfile(file_path): os.chmod(file_path, stat.S_IWRITE) | |
| os.unlink(file_path) | |
| elif os.path.isdir(file_path): | |
| shutil.rmtree(file_path) | |
| except Exception as e: | |
| print('Failed to delete %s. Reason: %s' % (file_path, e)) | |
| ## der patch nicht mehr notwendig | |
| def patchResolver(): | |
| from os import path | |
| search = 'if order_matters' | |
| insert = 'for i in relevant: i.priority = i._get_priority()' | |
| file = translatePath('special://home/addons/script.module.resolveurl/lib/resolveurl/__init__.py') | |
| ln = 0 | |
| column = 0 | |
| if path.isfile(file): | |
| isEdit = False | |
| with open(file) as f: | |
| for lineno, line in enumerate(f): | |
| if search in line: | |
| # print("{} {}".format(lineno + 1, line.find(search) + 1)) | |
| ln = lineno | |
| column = line.find(search) | |
| elif insert in line: | |
| isEdit = True | |
| break | |
| if isEdit == False: | |
| with open(file, 'r+') as f: | |
| lines = f.readlines() | |
| lines[ln+2] = lines[ln][0:column] + insert + '\n\n'# + lines[ln][column:] | |
| # Delete the file | |
| f.seek(0) | |
| for i in lines: | |
| # Append the lines | |
| f.write(i) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment