From 1b71f7a8ded6c0bdce0cb57f57acd3af499aefc8 Mon Sep 17 00:00:00 2001 From: coder-alpha Date: Sat, 18 Nov 2017 08:57:18 -0500 Subject: [PATCH] Anime support, Force transcoding option, etc. New: - Anime support with all features - 9anime Provider - Added ver. and update date info to Providers - Force Transcoding (under Device Options) - No Extra Page Info (Anime) (under Device Options) - Google video check fallback uses requests libs for more accurate determination of video presence Changes: - sort Device Options alphabetically - removed 'audio_channels' info from ServiceCode Dedicated Anime section available under the Channel: - What's Hot - Movies & TV-Series - via Search - via External Listing > Type > Anime Providers with Anime content: - GoGoAnime - 9anime - ALL-UC - FMovies (limited) - FMovies.io (limited) - YesMovies (limited) --- Contents/Code/__init__.py | 1086 ++++++++++++----- Contents/Code/common.py | 48 +- Contents/Code/fmovies.py | 219 ++-- Contents/Code/tools.py | 14 +- Contents/DefaultPrefs.json | 2 +- .../Shared/resources/lib/libraries/client.py | 39 +- .../Shared/resources/lib/libraries/control.py | 6 +- .../Shared/resources/lib/proxies/__init__.py | 4 +- .../Shared/resources/lib/proxies/xperienc.py | 6 +- .../resources/lib/resolvers/__init__.py | 2 + .../resources/lib/resolvers/host_gvideo.py | 65 +- .../resources/lib/resolvers/host_mega.py | 6 +- .../resources/lib/resolvers/host_openload.py | 8 +- .../resources/lib/resolvers/host_youtube.py | 8 +- .../resources/lib/sources/alluc_mv_tv.py | 51 +- .../Shared/resources/lib/sources/cyro_ca.py | 6 +- .../resources/lib/sources/fmovies_ca.py | 6 +- .../resources/lib/sources/fmovies_mv_tv.py | 8 +- .../Shared/resources/lib/sources/gogoanime.py | 57 +- .../resources/lib/sources/nineanime_ca.py | 650 ++++++++++ .../resources/lib/sources/primewire_mv_tv.py | 6 +- .../resources/lib/sources/yesmovies_mv_tv.py | 6 +- Contents/Services/Shared Code/misc.pys | 136 ++- Contents/Services/URL/FMovies/ServiceCode.pys | 48 +- 24 files changed, 1893 insertions(+), 594 deletions(-) create mode 100644 Contents/Libraries/Shared/resources/lib/sources/nineanime_ca.py diff --git a/Contents/Code/__init__.py b/Contents/Code/__init__.py index eb034d9..fc80d06 100644 --- a/Contents/Code/__init__.py +++ b/Contents/Code/__init__.py @@ -9,6 +9,7 @@ import common, updater, fmovies, tools, download from DumbTools import DumbKeyboard import AuthTools +from __builtin__ import eval TITLE = common.TITLE PREFIX = common.PREFIX @@ -70,14 +71,14 @@ MC = common.NewMessageContainer(common.PREFIX, common.TITLE) -ES_API_URL = 'http://movies-v2.api-fetch.website' - CHECK_AUTH = 'CheckAuth' ###################################################################################### # Set global variables -CAT_WHATS_HOT = ['Sizzlers','Most Favourited','Recommended','Most Watched This Week','Most Watched This Month','Latest Movies','Latest TV-Series','Requested Movies'] -CAT_REGULAR = ['Movies','TV-Series','Top-IMDb','Most Watched','Sitemap Listing'] +CAT_WHATS_HOT = [] +CAT_WHATS_HOT_REGULAR = ['Sizzlers','Most Favourited','Recommended','Most Watched This Week','Most Watched This Month','Latest Movies','Latest TV-Series','Requested Movies'] +CAT_WHATS_HOT_ANIME = ['Recently Updated (Anime)','Recently Updated Sub (Anime)','Recently Updated Dub (Anime)', 'Trending (Anime)', 'Recently Added (Anime)', 'Ongoing (Anime)', 'Requested (Anime)'] +CAT_REGULAR = ['Anime','Movies','TV-Series','Top-IMDb','Most Watched','Sitemap Listing'] CAT_FILTERS = ['Release','Genre','Country','Filter Setup >>>'] CAT_GROUPS = ['What\'s Hot ?', 'Movies & TV-Series', 'Sort using...','Site News'] @@ -115,6 +116,11 @@ def Start(): common.CACHE_META.clear() HTTP.ClearCache() + for x in CAT_WHATS_HOT_REGULAR: + CAT_WHATS_HOT.append(x) + for x in CAT_WHATS_HOT_ANIME: + CAT_WHATS_HOT.append(x) + try: CACHE_EXPIRY = 60 * int(Prefs["cache_expiry_time"]) except: @@ -209,23 +215,17 @@ def SiteCookieRoutine(session=None, reset=False, dump=False, quiet=False, **kwar ###################################################################################### @route(PREFIX + "/PreCacheStuff") def PreCacheStuff(): - try: - url = (fmovies.BASE_URL) - #HTTP.PreCache(newurl) - page_data, error = common.GetPageAsString(url=url) - if Prefs["use_debug"]: - Log("Pre-Cached : %s" % url) - except Exception as e: - Log(e) - - try: - url = (fmovies.BASE_URL + fmovies.SITE_MAP) - #HTTP.PreCache(newurl) - page_data, error = common.GetPageAsString(url=url) - if Prefs["use_debug"]: - Log("Pre-Cached : %s" % url) - except Exception as e: - Log(e) + + PRE_CACHE_URLS = [fmovies.BASE_URL, fmovies.BASE_URL + '/home', fmovies.BASE_URL + fmovies.SITE_MAP, common.ANIME_URL] + + for url in PRE_CACHE_URLS: + try: + Thread.Create(common.GetPageAsString, {}, url) + #HTTP.PreCache(newurl) + if Prefs["use_debug"]: + Log("Pre-Caching : %s" % url) + except Exception as e: + Log(e) ###################################################################################### @route(PREFIX + "/SleepPersistAndUpdateCookie") @@ -440,7 +440,7 @@ def DeviceOptions(session, **kwargs): oc = ObjectContainer(title2='Device Options', no_cache=common.isForceNoCache()) c = 1 - for key in common.DEVICE_OPTIONS: + for key in sorted(common.DEVICE_OPTIONS): summary = common.DEVICE_OPTION[key] bool = False if (Dict['Toggle'+key+session] == None or Dict['Toggle'+key+session] == 'disabled') else True title_msg = "%02d). %s %s | %s" % (c, common.GetEmoji(type=bool, mode='simple', session=session), key, summary) @@ -888,7 +888,7 @@ def ExtProviders(session, curr_provs=None, refresh=False, item=None, setbool='Tr ARRAY_T = [] for prov in scanned_extProviders: - prov['enabled'] = 'True' + prov['enabled'] = True ARRAY_T.append(prov) if curr_provs == None: @@ -898,6 +898,7 @@ def ExtProviders(session, curr_provs=None, refresh=False, item=None, setbool='Tr curr_provs = JSON.ObjectFromString(D(curr_provs)) del common.OPTIONS_PROVIDERS[:] + common.OPTIONS_PROVIDERS = [] if curr_provs != None and len(curr_provs) > 0: p_urls = [] @@ -1609,11 +1610,20 @@ def ShowMenu(title, session=None, **kwargs): @route(PREFIX + "/sortMenu") def SortMenu(title, session=None, **kwargs): - url = fmovies.BASE_URL + '/fmovies' oc = ObjectContainer(title2 = title, no_cache=common.isForceNoCache()) + is9anime = 'False' # Test for the site url initially to report a logical error + if title in CAT_WHATS_HOT_REGULAR: + url = fmovies.BASE_URL + '/home' + elif title in CAT_WHATS_HOT_ANIME: + url = common.ANIME_URL + is9anime = 'True' + else: + url = fmovies.BASE_URL + '/home' + page_data, error = common.GetPageElements(url = url) + if page_data == None: bool, noc, page_data = testSite(url=url) if bool == False: @@ -1689,23 +1699,59 @@ def SortMenu(title, session=None, **kwargs): elems = page_data.xpath(".//*[@id='body-wrapper']//div[@class='widget latest-series']//div[@class='item']") elif title == CAT_WHATS_HOT[7]: elems = page_data.xpath(".//*[@id='body-wrapper']//div[@class='widget requested']//div[@class='item']") + elif title == CAT_WHATS_HOT[8]: # Anime section starts here + elems = page_data.xpath(".//div[@data-name='updated']//*//div[@class='item']") + elif title == CAT_WHATS_HOT[9]: + elems = page_data.xpath(".//div[@data-name='updated-sub']//*//div[@class='item']") + elif title == CAT_WHATS_HOT[10]: + elems = page_data.xpath(".//div[@data-name='updated-dub']//*//div[@class='item']") + elif title == CAT_WHATS_HOT[11]: + elems = page_data.xpath(".//div[@data-name='top-week']//*//div[@class='item']") + elif title == CAT_WHATS_HOT[12]: + elems = page_data.xpath(".//div[@class='widget']//div[@class='list-film']//*//div[@class='item']") + elif title == CAT_WHATS_HOT[13]: + elems = page_data.xpath(".//div[@class='widget list-link']//div[@data-name='ongoing']//div[@class='item']") + elif title == CAT_WHATS_HOT[14]: + elems = page_data.xpath(".//div[@class='widget list-link']//div[@data-name='requested']//div[@class='item']") for elem in elems: - name = elem.xpath(".//a[@class='name']//text()")[0] - loc = fmovies.BASE_URL + elem.xpath(".//a[@class='name']//@href")[0] - thumb_t = elem.xpath(".//a[@class='poster']//@src")[0] - thumb = thumb_t if 'url' not in thumb_t else thumb_t.split('url=')[1] - summary = 'Plot Summary on Item Page.' - eps_nos = '' title_eps_no = '' - try: - eps_nos = elem.xpath(".//div[@class='status']//span//text()")[0] - eps_no_i = str(int(eps_nos.strip())) - title_eps_no = ' (Eps:'+eps_no_i+')' - eps_nos = ' Episodes: ' + eps_no_i - except: - pass + + if is9anime == 'False': + name = elem.xpath(".//a[@class='name']//text()")[0] + loc = fmovies.BASE_URL + elem.xpath(".//a[@class='name']//@href")[0] + thumb_t = elem.xpath(".//a[@class='poster']//@src")[0] + thumb = thumb_t if 'url' not in thumb_t else thumb_t.split('url=')[1] + summary = 'Plot Summary on Item Page.' + + try: + eps_nos = elem.xpath(".//div[@class='status']//span//text()")[0] + eps_no_i = str(int(eps_nos.strip())) + title_eps_no = ' (Eps:'+eps_no_i+')' + eps_nos = ' Episodes: ' + eps_no_i + except: + pass + else: + if title in [CAT_WHATS_HOT[13],CAT_WHATS_HOT[14]]: + name = elem.xpath(".//a//text()")[0] + loc = elem.xpath(".//a//@href")[0] + thumb_t = elem.xpath(".//img//@src")[0] + thumb = thumb_t if 'url' not in thumb_t else thumb_t.split('url=')[1] + summary = 'Plot Summary on Item Page.' + else: + name = elem.xpath(".//a[@class='name']//text()")[0] + loc = elem.xpath(".//a[@class='name']//@href")[0] + thumb_t = elem.xpath(".//a[@class='poster']//@src")[0] + thumb = thumb_t if 'url' not in thumb_t else thumb_t.split('url=')[1] + summary = 'Plot Summary on Item Page.' + + try: + eps_nosx = elem.xpath(".//div[@class='status']//text()")[0].strip() + title_eps_no = ' (Eps:'+eps_nosx+')' + eps_nos = ' Episodes: ' + eps_nosx + except: + pass try: more_info_link = elem.xpath(".//@data-tip")[0] @@ -1713,9 +1759,9 @@ def SortMenu(title, session=None, **kwargs): more_info_link = None oc.add(DirectoryObject( - key = Callback(EpisodeDetail, title = name, url = loc, thumb = thumb, session = session), + key = Callback(EpisodeDetail, title = name, url = loc, thumb = thumb, session = session, is9anime=is9anime), title = name + title_eps_no, - summary = GetMovieInfo(summary=summary, urlPath=more_info_link, referer=url, session=session) + eps_nos, + summary = GetMovieInfo(summary=summary, urlPath=more_info_link, referer=url, session=session, is9anime=is9anime) + eps_nos, thumb = Resource.ContentsOfURLWithFallback(url = thumb, fallback=ICON_UNAV) ) ) @@ -1735,6 +1781,7 @@ def SortMenu(title, session=None, **kwargs): @route(PREFIX + "/showcategory") def ShowCategory(title, key=' ', urlpath=None, page_count='1', session=None, **kwargs): + is9anime = 'False' if urlpath != None: newurl = urlpath + '?page=%s' % page_count else: @@ -1745,14 +1792,17 @@ def ShowCategory(title, key=' ', urlpath=None, page_count='1', session=None, **k elif title == CAT_FILTERS[2]: newurl = (fmovies.BASE_URL + '/country/' + key.lower() + '?page=%s' % page_count) elif title == CAT_REGULAR[0]: - newurl = (fmovies.BASE_URL + '/movies' + '?page=%s' % page_count) + newurl = (common.ANIME_URL + '/newest' + '?page=%s' % page_count) + is9anime = 'True' elif title == CAT_REGULAR[1]: - newurl = (fmovies.BASE_URL + '/tv-series' + '?page=%s' % page_count) + newurl = (fmovies.BASE_URL + '/movies' + '?page=%s' % page_count) elif title == CAT_REGULAR[2]: - newurl = (fmovies.BASE_URL + '/top-imdb' + '?page=%s' % page_count) + newurl = (fmovies.BASE_URL + '/tv-series' + '?page=%s' % page_count) elif title == CAT_REGULAR[3]: - newurl = (fmovies.BASE_URL + '/most-watched' + '?page=%s' % page_count) + newurl = (fmovies.BASE_URL + '/top-imdb' + '?page=%s' % page_count) elif title == CAT_REGULAR[4]: + newurl = (fmovies.BASE_URL + '/most-watched' + '?page=%s' % page_count) + elif title == CAT_REGULAR[5]: newurl = (fmovies.BASE_URL + fmovies.SITE_MAP) page_data, error = common.GetPageElements(url=newurl) @@ -1768,7 +1818,7 @@ def ShowCategory(title, key=' ', urlpath=None, page_count='1', session=None, **k return MC.message_container(title, error) elems = [] - if title == CAT_REGULAR[4]: + if title == CAT_REGULAR[5]: if len(fmovies.SITE_MAP_HTML_ELEMS) == 0: elems_all = page_data.xpath(".//*[@id='body-wrapper']/div/div/div[2]/ul/li[9]/ul/li") fmovies.SITE_MAP_HTML_ELEMS = elems_all @@ -1779,6 +1829,13 @@ def ShowCategory(title, key=' ', urlpath=None, page_count='1', session=None, **k limit_y = int(page_count) * 50 for i in range(limit_x, limit_y): elems.append(elems_all[i]) + elif title == CAT_REGULAR[0]: # Anime + elems = page_data.xpath(".//*[@id='body-wrapper']//div[@class='row']//div[@class='item']") + last_page_no = int(page_count) + try: + last_page_no = int(page_data.xpath(".//*[@id='body-wrapper']//span[@class='total']//text()")[0]) + except: + pass else: elems = page_data.xpath(".//*[@id='body-wrapper']//div[@class='row movie-list']//div[@class='item']") last_page_no = int(page_count) @@ -1805,7 +1862,10 @@ def ShowCategory(title, key=' ', urlpath=None, page_count='1', session=None, **k more_info_link = None else: name = elem.xpath(".//a[@class='name']//text()")[0] - loc = fmovies.BASE_URL + elem.xpath(".//a[@class='name']//@href")[0] + if title == CAT_REGULAR[0]: + loc = elem.xpath(".//a[@class='name']//@href")[0] + else: + loc = fmovies.BASE_URL + elem.xpath(".//a[@class='name']//@href")[0] thumb_t = elem.xpath(".//a[@class='poster']//@src")[0] thumb = thumb_t if 'url' not in thumb_t else thumb_t.split('url=')[1] summary = 'Plot Summary on Item Page.' @@ -1827,7 +1887,7 @@ def ShowCategory(title, key=' ', urlpath=None, page_count='1', session=None, **k oc.add(DirectoryObject( key = Callback(EpisodeDetail, title = name, url = loc, thumb = thumb, session = session), title = name + title_eps_no, - summary = GetMovieInfo(summary=summary, urlPath=more_info_link, referer=newurl, session=session) + eps_nos, + summary = GetMovieInfo(summary=summary, urlPath=more_info_link, referer=newurl, session=session, is9anime=is9anime) + eps_nos, thumb = Resource.ContentsOfURLWithFallback(url = thumb, fallback=ICON_UNAV) ) ) @@ -1862,7 +1922,7 @@ def ShowCategory(title, key=' ', urlpath=None, page_count='1', session=None, **k ###################################################################################### @route(PREFIX + "/episodedetail") -def EpisodeDetail(title, url, thumb, session, dataEXS=None, **kwargs): +def EpisodeDetail(title, url, thumb, session, dataEXS=None, dataEXSAnim=None, **kwargs): page_data, error = common.GetPageElements(url=url) if error != '': @@ -1870,225 +1930,450 @@ def EpisodeDetail(title, url, thumb, session, dataEXS=None, **kwargs): if page_data == None: return MC.message_container("Unknown Error", "Error: The page was not received. Please try again.") + is9anime = 'True' if common.ANIME_KEY in url.lower() else 'False' + client_id = '%s-%s' % (Client.Product, session) if client_id not in CUSTOM_TIMEOUT_DICT.keys(): CUSTOM_TIMEOUT_DICT[client_id] = {} - try: - title = unicode(page_data.xpath(".//*[@id='info']//h1[@class='name']//text()")[0]) - except: - title = unicode(title) - - try: - item_unav = '' - errs = page_data.xpath(".//*[@id='movie']//div[@class='alert alert-primary notice'][2]//text()") - for err in errs: - if 'There is no server for this movie right now, please try again later.' in err: - item_unav = ' ' + common.GetEmoji(type='neg', session=session) - break - except: - pass + if dataEXS==None and dataEXSAnim==None and is9anime == 'False': + if Prefs["use_debug"]: + Log("============================= Processing bmovies ===============================") - try: - if thumb == None: - thumb_t = page_data.xpath(".//*[@id='info']//div//img")[0] - thumb = thumb_t if 'url' not in thumb_t else thumb_t.split('url=')[1] - except: + try: + title = unicode(page_data.xpath(".//*[@id='info']//h1[@class='name']//text()")[0]) + except: + title = unicode(title) + + try: + item_unav = '' + errs = page_data.xpath(".//*[@id='movie']//div[@class='alert alert-primary notice'][2]//text()") + for err in errs: + if 'There is no server for this movie right now, please try again later.' in err: + item_unav = ' ' + common.GetEmoji(type='neg', session=session) + break + except: + pass + try: if thumb == None: - thumb_t = page_data.xpath(".//*[@id='info']//div//img//@src")[0] + thumb_t = page_data.xpath(".//*[@id='info']//div//img")[0] thumb = thumb_t if 'url' not in thumb_t else thumb_t.split('url=')[1] except: - thumb = R(ICON_UNAV) + try: + if thumb == None: + thumb_t = page_data.xpath(".//*[@id='info']//div//img//@src")[0] + thumb = thumb_t if 'url' not in thumb_t else thumb_t.split('url=')[1] + except: + thumb = R(ICON_UNAV) + + try: + serverts = page_data.xpath(".//body[@class='watching']//@data-ts")[0] + except: + serverts = 0 - try: - serverts = page_data.xpath(".//body[@class='watching']//@data-ts")[0] - except: - serverts = 0 - - try: - art = page_data.xpath(".//meta[@property='og:image'][1]//@content")[0] - except: - art = 'https://cdn.rawgit.com/coder-alpha/FMoviesPlus.bundle/master/Contents/Resources/art-default.jpg' - oc = ObjectContainer(title2 = title + item_unav, art = art, no_cache=common.isForceNoCache()) - - try: - summary = page_data.xpath(".//*[@id='info']//div[@class='info col-md-19']//div[@class='desc']//text()")[0] - #summary = re.sub(r'[^0-9a-zA-Z \-/.,\':+&!()]', '', summary) - except: - summary = 'Summary Not Available.' - - try: - trailer = page_data.xpath(".//*[@id='control']//div['item mbtb watch-trailer hidden-xs']//@data-url")[0] - except: - trailer = None - - try: - year = str(page_data.xpath(".//*[@id='info']//dl[@class='meta col-sm-12'][2]//dd[2]//text()")[0][0:4]) - except: - year = 'Not Available' + try: + art = page_data.xpath(".//meta[@property='og:image'][1]//@content")[0] + except: + art = 'https://cdn.rawgit.com/coder-alpha/FMoviesPlus.bundle/master/Contents/Resources/art-default.jpg' + oc = ObjectContainer(title2 = title + item_unav, art = art, no_cache=common.isForceNoCache()) - try: - rating = str(page_data.xpath(".//*[@id='info']//div[@class='info col-md-19']//span[1]//b//text()")[0]) - except: - rating = 'Not Available' + try: + summary = page_data.xpath(".//*[@id='info']//div[@class='info col-md-19']//div[@class='desc']//text()")[0] + #summary = re.sub(r'[^0-9a-zA-Z \-/.,\':+&!()]', '', summary) + except: + summary = 'Summary Not Available.' - try: - duration = int(page_data.xpath(".//*[@id='info']//div[@class='info col-md-19']//span[2]//b//text()")[0].strip('/episode').strip(' min')) - except: - duration = 'Not Available' + try: + trailer = page_data.xpath(".//*[@id='control']//div['item mbtb watch-trailer hidden-xs']//@data-url")[0] + except: + trailer = None + + try: + year = str(page_data.xpath(".//*[@id='info']//dl[@class='meta col-sm-12'][2]//dd[2]//text()")[0][0:4]) + except: + year = 'Not Available' + + try: + rating = str(page_data.xpath(".//*[@id='info']//div[@class='info col-md-19']//span[1]//b//text()")[0]) + except: + rating = 'Not Available' + + try: + duration = int(page_data.xpath(".//*[@id='info']//div[@class='info col-md-19']//span[2]//b//text()")[0].strip('/episode').strip(' min')) + except: + duration = 'Not Available' - try: - genre0 = page_data.xpath(".//*[@id='info']//dl[@class='meta col-sm-12'][1]//dd[1]//a//text()") - genre = (','.join(str(x) for x in genre0)) - if genre == '': + try: + genre0 = page_data.xpath(".//*[@id='info']//dl[@class='meta col-sm-12'][1]//dd[1]//a//text()") + genre = (','.join(str(x) for x in genre0)) + if genre == '': + genre = 'Not Available' + except: genre = 'Not Available' - except: - genre = 'Not Available' - - try: - directors0 = page_data.xpath(".//*[@id='info']//dl[@class='meta col-sm-12'][1]//dd[3]//text()") - directors = (','.join(common.removeAccents(x) for x in directors0)) - if directors.strip() == '...': + + try: + directors0 = page_data.xpath(".//*[@id='info']//dl[@class='meta col-sm-12'][1]//dd[3]//text()") + directors = (','.join(common.removeAccents(x) for x in directors0)) + if directors.strip() == '...': + directors = 'Not Available' + except: directors = 'Not Available' - except: - directors = 'Not Available' - - try: - roles0 = page_data.xpath(".//*[@id='info']//dl[@class='meta col-sm-12'][1]//dd[2]//a//text()") - roles = (','.join(common.removeAccents(x) for x in roles0)) - if roles == '': + + try: + roles0 = page_data.xpath(".//*[@id='info']//dl[@class='meta col-sm-12'][1]//dd[2]//a//text()") + roles = (','.join(common.removeAccents(x) for x in roles0)) + if roles == '': + roles = 'Not Available' + except: roles = 'Not Available' - except: - roles = 'Not Available' - - try: - servers = page_data.xpath(".//*[@id='servers']//div[@class='server row']") - except: - servers = [] - - summary += '\n ' - summary += 'Actors: ' + (roles) + '\n ' - summary += 'Directors: ' + (directors) + '\n ' - - if str(duration) == 'Not Available': - summary += 'Runtime: ' + (str(duration)) + '\n ' - duration = 0 - else: - summary += 'Runtime: ' + (str(duration)) + ' min.' + '\n ' - - summary += 'Year: ' + (year) + '\n ' - summary += 'Genre: ' + (genre) + '\n ' - summary += 'IMDB rating: ' + (rating) + '\n ' + + try: + servers = page_data.xpath(".//*[@id='servers']//div[@class='server row']") + except: + servers = [] + + summary += '\n ' + summary += 'Actors: ' + (roles) + '\n ' + summary += 'Directors: ' + (directors) + '\n ' + + if str(duration) == 'Not Available': + summary += 'Runtime: ' + (str(duration)) + '\n ' + duration = 0 + else: + summary += 'Runtime: ' + (str(duration)) + ' min.' + '\n ' + + summary += 'Year: ' + (year) + '\n ' + summary += 'Genre: ' + (genre) + '\n ' + summary += 'IMDB rating: ' + (rating) + '\n ' - try: - summary = unicode(common.ascii_only(summary)) - #summary = unicode(str(summary).replace('"','').replace('\u00','')) - except: - summary = 'Not Available' + try: + summary = unicode(common.ascii_only(summary)) + #summary = unicode(str(summary).replace('"','').replace('\u00','')) + except: + summary = 'Not Available' + + try: + similar_reccos = [] + similar_reccos_elems = page_data.xpath(".//*[@id='movie']//div[@class='row movie-list']//div[@class='item']") + + for elem in similar_reccos_elems: + similar_reccos_name = elem.xpath(".//a[@class='name']//text()")[0] + similar_reccos_loc = elem.xpath(".//a[@class='name']//@href")[0] + thumb_t = elem.xpath(".//a[@class='poster']//@src")[0] + similar_reccos_thumb = thumb_t if 'url' not in thumb_t else thumb_t.split('url=')[1] + try: + eps_nos = elem.xpath(".//div[@class='status']//span//text()")[0] + eps_nos = ' Episodes: ' + str(int(eps_nos.strip())) + except: + eps_nos = '' + try: + similar_reccos_more_info_link = elem.xpath(".//@data-tip")[0] + except: + similar_reccos_more_info_link = None + similar_reccos.append({'name':similar_reccos_name, 'loc':similar_reccos_loc, 'thumb':similar_reccos_thumb, 'more_info_link':similar_reccos_more_info_link, 'eps_nos':eps_nos}) + except: + similar_reccos = [] + + try: + tags0 = page_data.xpath(".//*[@id='tags']//a//text()") + tags = (','.join(str(x) for x in tags0)) + if tags == '': + tags = 'Not Available' + except: + tags = 'Not Available' - try: - similar_reccos = [] - similar_reccos_elems = page_data.xpath(".//*[@id='movie']//div[@class='row movie-list']//div[@class='item']") + episodes = [] + try: + episodes = page_data.xpath(".//*[@id='movie']//div[@class='widget boxed episode-summary']//div[@class='item']") + except: + pass + + servers_list = {} + episodes_list = [] + server_lab = [] + isTvSeries = False + isMovieWithMultiPart = False + + try: + item_type = page_data.xpath(".//div[@id='movie']/@data-type")[0] + if item_type == 'series': + isTvSeries = True + except: + pass + + for server in servers: + label = server.xpath(".//label[@class='name col-md-4 col-sm-5']//text()[2]")[0].strip() + if label in common.host_gvideo.FMOVIES_SERVER_MAP: + label = common.host_gvideo.FMOVIES_SERVER_MAP[label] + if 'Server F' in label: + label = label.replace('Server F','Google-F') + if 'Server G' in label: + label = label.replace('Server G','Google-G') + + server_lab.append(label) + items = server.xpath(".//ul//li") + if len(items) > 1: + isMovieWithMultiPart = True + + servers_list[label] = [] + c=0 + for item in items: + servers_list[label].append([]) + servers_list[label][c]={} + label_qual = item.xpath(".//a//text()")[0].strip() + label_val = item.xpath(".//a//@data-id")[0] + servers_list[label][c]['quality'] = label_qual + servers_list[label][c]['loc'] = label_val + c += 1 - for elem in similar_reccos_elems: - similar_reccos_name = elem.xpath(".//a[@class='name']//text()")[0] - similar_reccos_loc = elem.xpath(".//a[@class='name']//@href")[0] - thumb_t = elem.xpath(".//a[@class='poster']//@src")[0] - similar_reccos_thumb = thumb_t if 'url' not in thumb_t else thumb_t.split('url=')[1] + # label array of servers available - sort them so that presentation order is consistent + server_lab = sorted(server_lab) + + # remap server list - this way its easier to iterate for tv-show episodes + servers_list_new = [] + c=0 + + if len(servers_list) > 0: + for k in servers_list: + break + for no in servers_list[k]: + servers_list_new.append([]) + servers_list_new[c] = {} + for label in servers_list: + servers_list_new[c][label] = {} + try: + servers_list_new[c][label] = {'quality':servers_list[label][c]['quality'], 'loc':servers_list[label][c]['loc']} + except: + if c > 99: + servers_list_new[c][label] = {'quality':"%03d" % (c+1), 'loc':''} + else: + servers_list_new[c][label] = {'quality':"%02d" % (c+1), 'loc':''} + c += 1 + + ############################# Data ############################ + episodes_XS = [] + imdb_id = None + + if dataEXSAnim != None or is9anime == 'True': + if Prefs["use_debug"]: + Log("============================= Processsing 9anime ===============================") + try: + title = page_data.xpath("//*[@id='movie']//h1[@class='title']//text()")[0] + except: + pass + + title = unicode(common.ascii_only(title)) + + try: + item_unav = '' + errs = page_data.xpath(".//*[@id='movie']//div[@class='alert alert-primary notice'][2]//text()") + for err in errs: + if 'There is no server for this movie right now, please try again later.' in err: + item_unav = ' ' + common.GetEmoji(type='neg', session=session) + break + except: + pass + + isTvSeries = False + try: + item_type = page_data.xpath(".//div[@id='movie']/@data-type")[0] + if item_type == 'series' or item_type == 'ova': + isTvSeries = True + except: + pass + + + try: + if thumb == None: + thumb_t = page_data.xpath(".//*[@id='info']//div//img")[0] + thumb = thumb_t if 'url' not in thumb_t else thumb_t.split('url=')[1] + except: try: - eps_nos = elem.xpath(".//div[@class='status']//span//text()")[0] - eps_nos = ' Episodes: ' + str(int(eps_nos.strip())) + if thumb == None: + thumb_t = page_data.xpath(".//*[@id='info']//div//img//@src")[0] + thumb = thumb_t if 'url' not in thumb_t else thumb_t.split('url=')[1] except: - eps_nos = '' + thumb = R(ICON_UNAV) + + try: + serverts = page_data.xpath(".//body//@data-ts")[0] + except: + serverts = 0 + + try: + art = page_data.xpath(".//meta[@property='og:image'][1]//@content")[0] + except: + art = 'https://cdn.rawgit.com/coder-alpha/FMoviesPlus.bundle/master/Contents/Resources/art-default.jpg' + oc = ObjectContainer(title2 = title + item_unav, art = art, no_cache=common.isForceNoCache()) + + try: + summary = page_data.xpath(".//*[@id='info']//div[@class='desc']//text()") + summary = summary[0] + except: try: - similar_reccos_more_info_link = elem.xpath(".//@data-tip")[0] + summary = page_data.xpath(".//div[@class='shortcontent']/text()") + summary = summary[0] except: - similar_reccos_more_info_link = None - similar_reccos.append({'name':similar_reccos_name, 'loc':similar_reccos_loc, 'thumb':similar_reccos_thumb, 'more_info_link':similar_reccos_more_info_link, 'eps_nos':eps_nos}) - except: - similar_reccos = [] + summary = 'Summary Not Available.' - try: - tags0 = page_data.xpath(".//*[@id='tags']//a//text()") - tags = (','.join(str(x) for x in tags0)) - if tags == '': - tags = 'Not Available' - except: - tags = 'Not Available' - - episodes = [] - try: - episodes = page_data.xpath(".//*[@id='movie']//div[@class='widget boxed episode-summary']//div[@class='item']") - except: - pass + try: + trailer = page_data.xpath(".//*[@id='control']//div['item mbtb watch-trailer hidden-xs']//@data-url")[0] + except: + trailer = None - servers_list = {} - episodes_list = [] - server_lab = [] - isTvSeries = False - isMovieWithMultiPart = False - - try: - item_type = page_data.xpath(".//div[@id='movie']/@data-type")[0] - if item_type == 'series': - isTvSeries = True - except: - pass + try: + try: + year = str(page_data.xpath(".//dt[contains(text(),'Date')]//following-sibling::dd[1]//text()")[0].strip()[-4:]) + year = str(int(year)) + except: + year = str(page_data.xpath(".//dt[contains(text(),'Premiered')]//following-sibling::dd[1]//text()")[0].strip()[-4:]) + year = str(int(year)) + except: + year = 'Not Available' + + try: + rating = str(page_data.xpath(".//*[@id='info']//dl[@class='meta col-sm-12'][2]//dd[1]//text()")[0].split('/')[0]) + except: + rating = 'Not Available' + + try: + if isTvSeries == True: + duration = int(page_data.xpath(".//dd[contains(text(),'min')]//text()")[0].strip('/episode').strip(' min')) + else: + duration = int(eval(page_data.xpath(".//dd[contains(text(),'min') or contains(text(),'hr')]/text()")[0].replace('hr.','*60+').strip(' min'))) + except: + duration = 'Not Available' + + try: + genre0 = page_data.xpath(".//*[@id='info']//dl[@class='meta col-sm-12'][1]//dd[3]//a//text()") + genre = (','.join(str(x) for x in genre0)) + if genre == '': + genre = 'Not Available' + except: + genre = 'Not Available' - for server in servers: - label = server.xpath(".//label[@class='name col-md-4 col-sm-5']//text()[2]")[0].strip() - if label in common.host_gvideo.FMOVIES_SERVER_MAP: - label = common.host_gvideo.FMOVIES_SERVER_MAP[label] - if 'Server F' in label: - label = label.replace('Server F','Google-F') - if 'Server G' in label: - label = label.replace('Server G','Google-G') - - server_lab.append(label) - items = server.xpath(".//ul//li") - if len(items) > 1: - isMovieWithMultiPart = True - - servers_list[label] = [] - c=0 - for item in items: - servers_list[label].append([]) - servers_list[label][c]={} - label_qual = item.xpath(".//a//text()")[0].strip() - label_val = item.xpath(".//a//@data-id")[0] - servers_list[label][c]['quality'] = label_qual - servers_list[label][c]['loc'] = label_val - c += 1 + directors = 'Not Available' + roles = 'Not Available' + + try: + servers = page_data.xpath(".//*[@id='servers']//div[@class='server row']") + except: + servers = [] + + summary += '\n ' + summary += 'Actors: ' + (roles) + '\n ' + summary += 'Directors: ' + (directors) + '\n ' + + if str(duration) == 'Not Available': + summary += 'Runtime: ' + (str(duration)) + '\n ' + duration = 0 + else: + summary += 'Runtime: ' + (str(duration)) + ' min.' + '\n ' + + summary += 'Year: ' + (year) + '\n ' + summary += 'Genre: ' + (genre) + '\n ' + summary += 'Rating: ' + (rating) + '\n ' - # label array of servers available - sort them so that presentation order is consistent - server_lab = sorted(server_lab) - - # remap server list - this way its easier to iterate for tv-show episodes - servers_list_new = [] - c=0 - - if len(servers_list) > 0: - for k in servers_list: - break - for no in servers_list[k]: - servers_list_new.append([]) - servers_list_new[c] = {} - for label in servers_list: - servers_list_new[c][label] = {} + try: + summary = unicode(common.ascii_only(summary)) + #summary = unicode(str(summary).replace('"','').replace('\u00','')) + except: + summary = 'Not Available' + + try: + similar_reccos = [] + similar_reccos_elems = page_data.xpath(".//*[@id='movie']//div[@class='row']//div[@class='item']") + + for elem in similar_reccos_elems: + similar_reccos_name = elem.xpath(".//a[@class='name']//text()")[0] + similar_reccos_loc = elem.xpath(".//a[@class='name']//@href")[0] + thumb_t = elem.xpath(".//a[@class='poster']//@src")[0] + similar_reccos_thumb = thumb_t if 'url' not in thumb_t else thumb_t.split('url=')[1] try: - servers_list_new[c][label] = {'quality':servers_list[label][c]['quality'], 'loc':servers_list[label][c]['loc']} + eps_nos = elem.xpath(".//div[@class='status']//span//text()")[0] + eps_nos = ' Episodes: ' + str(int(eps_nos.strip())) except: - if c > 99: - servers_list_new[c][label] = {'quality':"%03d" % (c+1), 'loc':''} - else: - servers_list_new[c][label] = {'quality':"%02d" % (c+1), 'loc':''} - c += 1 + eps_nos = '' + try: + similar_reccos_more_info_link = elem.xpath(".//@data-tip")[0] + except: + similar_reccos_more_info_link = None + similar_reccos.append({'name':similar_reccos_name, 'loc':similar_reccos_loc, 'thumb':similar_reccos_thumb, 'more_info_link':similar_reccos_more_info_link, 'eps_nos':eps_nos}) + except: + similar_reccos = [] - ############################# Data ############################ - episodes_XS = [] - imdb_id = None - if dataEXS != None or ES_API_URL.lower() in url: + try: + tags0 = page_data.xpath(".//*[@id='tags']//a//text()") + tags = (','.join(str(x) for x in tags0)) + if tags == '': + tags = 'Not Available' + except: + tags = 'Not Available' + + episodes = [] + try: + episodes = page_data.xpath(".//*[@id='movie']//div[@class='widget boxed episode-summary']//div[@class='item']") + except: + pass + + servers_list = {} + episodes_list = [] + server_lab = [] + isMovieWithMultiPart = False + + for server in servers: + label = server.xpath(".//label[@class='name col-md-3 col-sm-4']//text()[2]")[0].strip() + if label in common.host_gvideo.FMOVIES_SERVER_MAP: + label = common.host_gvideo.FMOVIES_SERVER_MAP[label] + if 'Server F' in label: + label = label.replace('Server F','Google-F') + if 'Server G' in label: + label = label.replace('Server G','Google-G') + + server_lab.append(label) + items = server.xpath(".//ul//li") + if len(items) > 1: + isMovieWithMultiPart = True + + servers_list[label] = [] + c=0 + for item in items: + servers_list[label].append([]) + servers_list[label][c]={} + label_qual = item.xpath(".//a//text()")[0].strip() + label_val = item.xpath(".//a//@data-id")[0] + servers_list[label][c]['quality'] = label_qual + servers_list[label][c]['loc'] = label_val + c += 1 + + # label array of servers available - sort them so that presentation order is consistent + server_lab = sorted(server_lab) + + # remap server list - this way its easier to iterate for tv-show episodes + servers_list_new = [] + c=0 + + if len(servers_list) > 0: + for k in servers_list: + break + for no in servers_list[k]: + servers_list_new.append([]) + servers_list_new[c] = {} + for label in servers_list: + servers_list_new[c][label] = {} + try: + servers_list_new[c][label] = {'quality':servers_list[label][c]['quality'], 'loc':servers_list[label][c]['loc']} + except: + if c > 99: + servers_list_new[c][label] = {'quality':"%03d" % (c+1), 'loc':''} + else: + servers_list_new[c][label] = {'quality':"%02d" % (c+1), 'loc':''} + c += 1 + + if dataEXS != None or common.ES_API_URL.lower() in url: + if Prefs["use_debug"]: + Log("============================= Processsing API-Fetch ===============================") json_data = None if dataEXS == None: @@ -2143,7 +2428,7 @@ def EpisodeDetail(title, url, thumb, session, dataEXS=None, **kwargs): type = 'movie' subtype = 'movie' - loc = ES_API_URL + '/%s/%s' % (type,id) + loc = common.ES_API_URL + '/%s/%s' % (type,id) dataEXS_d = {} dataEXS_d['title'] = name @@ -2201,6 +2486,12 @@ def EpisodeDetail(title, url, thumb, session, dataEXS=None, **kwargs): genre = 'Not Available' except: genre = 'Not Available' + + try: + summary = unicode(common.ascii_only(summary)) + #summary = unicode(str(summary).replace('"','').replace('\u00','')) + except: + summary = 'Not Available' # trailer try: @@ -2366,7 +2657,7 @@ def EpisodeDetail(title, url, thumb, session, dataEXS=None, **kwargs): if SeasonN > 0 or True: # enable for all - even if this might be a single season oc.add(DirectoryObject( - key = Callback(Search, query = common.cleantitle.removeParanthesisAndSeason(title, SeasonN), session = session, mode='other seasons', thumb=thumb, summary=summary), + key = Callback(Search, query = common.cleantitle.removeParanthesisAndSeason(title, SeasonN), session = session, mode='other seasons', thumb=thumb, summary=summary, is9anime=is9anime), title = "Other Seasons", summary = 'Other Seasons of ' + common.cleantitle.removeParanthesis(title), art = art, @@ -2404,7 +2695,7 @@ def EpisodeDetail(title, url, thumb, session, dataEXS=None, **kwargs): pass if SeasonN > 0 or True: # enable for all - even if this might be a single season oc.add(DirectoryObject( - key = Callback(Search, query = common.cleantitle.removeParanthesisAndSeason(title, SeasonN), session = session, mode='other seasons', thumb=thumb, summary=summary), + key = Callback(Search, query = common.cleantitle.removeParanthesisAndSeason(title, SeasonN), session = session, mode='other seasons', thumb=thumb, summary=summary, is9anime=is9anime), title = "Other Seasons", summary = 'Other Seasons of ' + common.cleantitle.removeParanthesisAndSeason(title, SeasonN), art = art, @@ -2433,7 +2724,7 @@ def EpisodeDetail(title, url, thumb, session, dataEXS=None, **kwargs): try: episode = eps[server_lab[0]]['quality'] title_s = episode - oc.add(VideoClipObject( + oc.add(DirectoryObject( key = Callback(TvShowDetail, tvshow=title, title=title_s, url=url, servers_list_new=servers_list_new[c], server_lab=(','.join(str(x) for x in server_lab)), summary=summary, thumb=thumb, art=art, year=year, rating=rating, duration=duration, genre=genre, directors=directors, roles=roles, serverts=serverts, session=session, season=SeasonN, episode=episode, treatasmovie=True, imdb_id=imdb_id), title = title_s, summary = summary, @@ -2553,7 +2844,7 @@ def EpisodeDetail(title, url, thumb, session, dataEXS=None, **kwargs): if not Prefs['use_openload_pairing'] and 'openload' in host and common.is_uss_installed() and URLService.ServiceIdentifierForURL(server_info) != None: durl = server_info else: - durl = "fmovies://" + E(JSON.StringFromObject({"url":url, "server":server_info_t, "title":title, "summary":summary, "thumb":thumb, "art":art, "year":year, "rating":rating, "duration":str(duration), "genre":genre, "roles":roles, "directors":directors, "roles":roles, "isTargetPlay":str(isTargetPlay), "useSSL":Prefs["use_https_alt"], "isVideoOnline":str(isVideoOnline), "useRedirector": redirector_enabled, 'urldata':'','quality':qual, 'pairrequired':pair_required, "host":host, "openloadApiKey":Prefs['control_openload_api_key']})) + durl = "fmovies://" + E(JSON.StringFromObject({"url":url, "server":server_info_t, "title":title, "summary":summary, "thumb":thumb, "art":art, "year":year, "rating":rating, "duration":str(duration), "genre":genre, "roles":roles, "directors":directors, "roles":roles, "isTargetPlay":str(isTargetPlay), "useSSL":Prefs["use_https_alt"], "isVideoOnline":str(isVideoOnline), "useRedirector": redirector_enabled, 'urldata':'','quality':qual, 'pairrequired':pair_required, "host":host, "openloadApiKey":Prefs['control_openload_api_key', "force_transcode":common.UsingOption(key=common.DEVICE_OPTIONS[10], session=session)]})) vco = VideoClipObject( url = durl, @@ -2647,7 +2938,7 @@ def EpisodeDetail(title, url, thumb, session, dataEXS=None, **kwargs): if len(similar_reccos) > 0: oc.add(DirectoryObject( - key = Callback(SimilarRecommendations, title = title, similar_reccos = E(JSON.StringFromObject(similar_reccos)), referer=url), + key = Callback(SimilarRecommendations, title = title, similar_reccos = E(JSON.StringFromObject(similar_reccos)), referer=url, is9anime = is9anime), title = "Similar Recommendations", summary = 'Discover other %s similar to %s' % (itemtype, title), art = art, @@ -2657,7 +2948,7 @@ def EpisodeDetail(title, url, thumb, session, dataEXS=None, **kwargs): if roles != 'Not Available': oc.add(DirectoryObject( - key = Callback(MoviesWithPeople, stars = roles, session = session), + key = Callback(MoviesWithPeople, stars = roles, session = session, is9anime = is9anime), title = "People Search", summary = 'Search for movies/shows based on a person from the current %s' % (itemtype), art = art, @@ -2667,7 +2958,7 @@ def EpisodeDetail(title, url, thumb, session, dataEXS=None, **kwargs): if tags != 'Not Available': oc.add(DirectoryObject( - key = Callback(MoviesWithTag, tags = tags, session = session), + key = Callback(MoviesWithTag, tags = tags, session = session, is9anime = is9anime), title = "Tag Search", summary = 'Search for movies/shows based on a Tag from the current %s' % (itemtype), art = art, @@ -2701,6 +2992,12 @@ def EpisodeDetail(title, url, thumb, session, dataEXS=None, **kwargs): @route(PREFIX + "/TvShowDetail") def TvShowDetail(tvshow, title, url, servers_list_new, server_lab, summary, thumb, art, year, rating, duration, genre, directors, roles, serverts, session, season=None, episode=None, treatasmovie=False, imdb_id=None, **kwargs): + try: + summary = unicode(common.ascii_only(summary)) + #summary = unicode(str(summary).replace('"','').replace('\u00','')) + except: + summary = 'Not Available' + oc = ObjectContainer(title2 = title, art = art, no_cache=common.isForceNoCache()) servers_list_new = servers_list_new.replace("'", "\"") @@ -2783,7 +3080,7 @@ def TvShowDetail(tvshow, title, url, servers_list_new, server_lab, summary, thum if not Prefs['use_openload_pairing'] and 'openload' in host and common.is_uss_installed() and URLService.ServiceIdentifierForURL(server_info) != None: durl = server_info else: - durl = "fmovies://" + E(JSON.StringFromObject({"url":url, "server":server_info_t, "title":title, "summary":summary, "thumb":thumb, "art":art, "year":year, "rating":rating, "duration":str(duration), "genre":genre, "directors":directors, "roles":roles, "isTargetPlay":str(isTargetPlay), "useSSL":Prefs["use_https_alt"], "isVideoOnline":str(isVideoOnline), "useRedirector": redirector_enabled, 'urldata':'', 'pairrequired':pair_required, "host":host, "openloadApiKey":Prefs['control_openload_api_key']})) + durl = "fmovies://" + E(JSON.StringFromObject({"url":url, "server":server_info_t, "title":title, "summary":summary, "thumb":thumb, "art":art, "year":year, "rating":rating, "duration":str(duration), "genre":genre, "directors":directors, "roles":roles, "isTargetPlay":str(isTargetPlay), "useSSL":Prefs["use_https_alt"], "isVideoOnline":str(isVideoOnline), "useRedirector": redirector_enabled, 'urldata':'', 'pairrequired':pair_required, "host":host, "openloadApiKey":Prefs['control_openload_api_key'], "force_transcode":common.UsingOption(key=common.DEVICE_OPTIONS[10], session=session)})) vco = None try: @@ -2875,6 +3172,12 @@ def TvShowDetail(tvshow, title, url, servers_list_new, server_lab, summary, thum @route(PREFIX + "/Videodetail") def VideoDetail(title, url, url_s, label_i_qual, label, serverts, thumb, summary, art, year, rating, duration, genre, directors, roles, libtype, session=None, watch_title=None, **kwargs): + try: + summary = unicode(common.ascii_only(summary)) + #summary = unicode(str(summary).replace('"','').replace('\u00','')) + except: + summary = 'Not Available' + oc = ObjectContainer(title2=title) try: # url_s = label_i['loc'] @@ -2943,7 +3246,7 @@ def VideoDetail(title, url, url_s, label_i_qual, label, serverts, thumb, summary if not Prefs['use_openload_pairing'] and 'openload' in host and common.is_uss_installed() and URLService.ServiceIdentifierForURL(server_info) != None: durl = server_info else: - durl = "fmovies://" + E(JSON.StringFromObject({"url":url, "server":server_info_t, "title":title, "summary":summary, "thumb":thumb, "art":art, "year":year, "rating":rating, "duration":str(duration), "genre":genre, "roles":roles, "directors":directors, "roles":roles, "isTargetPlay":str(isTargetPlay), "useSSL":Prefs["use_https_alt"], "isVideoOnline":str(isVideoOnline), "useRedirector": redirector_enabled, 'urldata':'','quality':qual, 'pairrequired':pair_required, "host":host, "openloadApiKey":Prefs['control_openload_api_key']})) + durl = "fmovies://" + E(JSON.StringFromObject({"url":url, "server":server_info_t, "title":title, "summary":summary, "thumb":thumb, "art":art, "year":year, "rating":rating, "duration":str(duration), "genre":genre, "roles":roles, "directors":directors, "roles":roles, "isTargetPlay":str(isTargetPlay), "useSSL":Prefs["use_https_alt"], "isVideoOnline":str(isVideoOnline), "useRedirector": redirector_enabled, 'urldata':'','quality':qual, 'pairrequired':pair_required, "host":host, "openloadApiKey":Prefs['control_openload_api_key'], "force_transcode":common.UsingOption(key=common.DEVICE_OPTIONS[10], session=session)})) vco = None vco = VideoClipObject( @@ -3045,7 +3348,13 @@ def VideoDetail(title, url, url_s, label_i_qual, label, serverts, thumb, summary #################################################################################################### @route(PREFIX + "/ExtSources") -def ExtSources(title, url, summary, thumb, art, rating, duration, genre, directors, roles, movtitle=None, year=None, tvshowtitle=None, season=None, episode=None, session=None, imdb_id=None, **kwargs): +def ExtSources(title, url, summary, thumb, art, rating, duration, genre, directors, roles, movtitle=None, year=None, tvshowtitle=None, season=None, episode=None, session=None, imdb_id=None,refresh=0, **kwargs): + + try: + summary = unicode(common.ascii_only(summary)) + #summary = unicode(str(summary).replace('"','').replace('\u00','')) + except: + summary = 'Not Available' tvshowcleaned = tvshowtitle if tvshowtitle != None: @@ -3079,7 +3388,7 @@ def ExtSources(title, url, summary, thumb, art, rating, duration, genre, directo if prog < 100: oc_conc = ObjectContainer(title2='External Sources - Progress %s%s' % (prog, '%'), no_history=True, no_cache=True) oc.append(DirectoryObject( - key = Callback(ExtSources, movtitle=movtitle, tvshowtitle=tvshowtitle, season=season, episode=episode, title=title, url=url, summary=summary, thumb=thumb, art=art, year=year, rating=rating, duration=duration, genre=genre, directors=directors, roles=roles, session=session), + key = Callback(ExtSources, movtitle=movtitle, tvshowtitle=tvshowtitle, season=season, episode=episode, title=title, url=url, summary=summary, thumb=thumb, art=art, year=year, rating=rating, duration=duration, genre=genre, directors=directors, roles=roles, session=session, refresh=int(refresh)+1), title = 'Refresh - %s%s Done' % (prog,'%'), summary = 'List sources by External Providers.', art = art, @@ -3109,6 +3418,7 @@ def ExtSources(title, url, summary, thumb, art, rating, duration, genre, directo return MC.message_container('External Sources', 'No External Sources Available for this video.') internal_extSources = extSourKey + extExtrasSources_urlservice = [] watch_title = movtitle if season != None and episode != None: @@ -3171,22 +3481,24 @@ def ExtSources(title, url, summary, thumb, art, rating, duration, genre, directo titleinfo = '' title_msg = "%s %s| %s | %s | %s | %s | %s" % (status, source['maininfo'], source['rip'], source['quality'], file_size, source['source']+':'+source['subdomain'] if source['source']=='gvideo' else source['source'], source['provider']) else: - titleinfo = source['titleinfo'] - title_msg = "%s %s| %s | %s | %s | %s | %s | %s" % (status, source['maininfo'], source['vidtype'], source['rip'], source['quality'], file_size, source['source'], source['provider']) + #titleinfo = source['titleinfo'] + #title_msg = "%s %s| %s | %s | %s | %s | %s | %s" % (status, source['maininfo'], source['vidtype'], source['rip'], source['quality'], file_size, source['source'], source['provider']) + #extExtrasSources_urlservice.append(source) + pass if common.DEV_DEBUG == True: Log("%s --- %s" % (title_msg, vidUrl)) Log('Playback: %s' % common.interface.getHostsPlaybackSupport(encode=False)[source['source']]) # all source links (not extras) that can be played via the code service - if vidUrl != None and source['enabled'] and source['allowsStreaming'] and source['misc']['player'] == 'iplayer' and common.interface.getHostsPlaybackSupport(encode=False)[source['source']]: + if vidUrl != None and source['vidtype'] in 'Movie/Show' and source['enabled'] and source['allowsStreaming'] and source['misc']['player'] == 'iplayer' and common.interface.getHostsPlaybackSupport(encode=False)[source['source']]: urldata = source['urldata'] params = source['params'] if not Prefs['use_openload_pairing'] and 'openload' in source['source'] and common.is_uss_installed() and URLService.ServiceIdentifierForURL(vidUrl) != None: durl = vidUrl else: - durl = "fmovies://" + E(JSON.StringFromObject({"url":url, "server":vidUrl, "title":title, "summary":summary, "thumb":thumb, "art":art, "year":year, "rating":rating, "duration":str(duration), "genre":genre, "directors":directors, "roles":roles, "isTargetPlay":str(isTargetPlay), "useSSL":Prefs["use_https_alt"], "isVideoOnline":str(isVideoOnline), "useRedirector": redirector_enabled, 'quality':source['quality'], 'urldata':urldata, 'params':params, 'pairrequired':pair_required, "host":source['source'], "openloadApiKey":Prefs['control_openload_api_key']})) + durl = "fmovies://" + E(JSON.StringFromObject({"url":url, "server":vidUrl, "title":title, "summary":summary, "thumb":thumb, "art":art, "year":year, "rating":rating, "duration":str(duration), "genre":genre, "directors":directors, "roles":roles, "isTargetPlay":str(isTargetPlay), "useSSL":Prefs["use_https_alt"], "isVideoOnline":str(isVideoOnline), "useRedirector": redirector_enabled, 'quality':source['quality'], 'urldata':urldata, 'params':params, 'pairrequired':pair_required, "host":source['source'], "openloadApiKey":Prefs['control_openload_api_key'], "force_transcode":common.UsingOption(key=common.DEVICE_OPTIONS[10], session=session)})) try: oc.append(VideoClipObject( url = durl, @@ -3236,7 +3548,6 @@ def ExtSources(title, url, summary, thumb, art, rating, duration, genre, directo external_extSources = common.FilterBasedOn(external_extSources, use_host=False, use_filesize=False) extSources_urlservice = [] - extExtrasSources_urlservice = [] for source in external_extSources: bool = True for i in common.INTERNAL_SOURCES: @@ -3267,13 +3578,13 @@ def ExtSources(title, url, summary, thumb, art, rating, duration, genre, directo if cx > 0: ext_summmary = ', '.join('%s (%s)' % (x['label'],'enabled' if str(x['enabled']).lower()=='true' else 'disabled') for x in common.INTERNAL_SOURCES_FILETYPE if 'Movie/Show' not in x['label']) - ocp = DirectoryObject(title = 'Extras (%s items)' % str(cx), key = Callback(PSExtSources, con_title='Extras (%s items)' % str(cx), extSources_play=E(JSON.StringFromObject(extExtrasSources_urlservice)), session=session, watch_title=watch_title, summary=summary, thumb=thumb, art=art, url=url, duration=duration, genre=genre), summary=ext_summmary,thumb=R(ICON_PLEX)) + ocp = DirectoryObject(title = 'Extras (%s items)' % str(cx), key = Callback(PSExtSources, con_title='Extras (%s items)' % str(cx), extSources_play=E(JSON.StringFromObject(extExtrasSources_urlservice)), session=session, watch_title=watch_title, year=year, summary=summary, thumb=thumb, art=art, url=url, duration=duration, rating=rating, genre=genre), summary=ext_summmary,thumb=R(ICON_PLEX)) if prog < 100: oc.insert(1,ocp) else: oc.insert(0,ocp) if c > 0: - ocp = DirectoryObject(title = 'External Sources (via Plex-Service) %s links' % str(c), key = Callback(PSExtSources, con_title='External Sources (via Plex-Service) %s links' % str(c), extSources_play=E(JSON.StringFromObject(extSources_urlservice)), session=session, watch_title=watch_title, summary=summary, thumb=thumb, art=art, url=url, duration=duration, genre=genre), summary='Playable via Plex services that are available and a Generic Player that tries its best to handle the rest.', thumb=R(ICON_PLEX)) + ocp = DirectoryObject(title = 'External Sources (via Plex-Service) %s links' % str(c), key = Callback(PSExtSources, con_title='External Sources (via Plex-Service) %s links' % str(c), extSources_play=E(JSON.StringFromObject(extSources_urlservice)), session=session, watch_title=watch_title, year=year, summary=summary, thumb=thumb, art=art, url=url, duration=duration, rating=rating, genre=genre), summary='Playable via Plex services that are available and a Generic Player that tries its best to handle the rest.', thumb=R(ICON_PLEX)) oc.append(ocp) if len(oc) == 0: @@ -3286,6 +3597,12 @@ def ExtSources(title, url, summary, thumb, art, rating, duration, genre, directo #################################################################################################### @route(PREFIX + "/ExtSourcesDownload") def ExtSourcesDownload(title, url, summary, thumb, art, rating, duration, genre, directors, roles, mode, movtitle=None, year=None, tvshowtitle=None, season=None, episode=None, session=None, imdb_id=None, refresh=0, **kwargs): + + try: + summary = unicode(common.ascii_only(summary)) + #summary = unicode(str(summary).replace('"','').replace('\u00','')) + except: + summary = 'Not Available' tvshowcleaned = tvshowtitle if tvshowtitle != None: @@ -3425,14 +3742,79 @@ def ExtSourcesDownload(title, url, summary, thumb, art, rating, duration, genre, #################################################################################################### @route(PREFIX + "/PSExtSources") -def PSExtSources(extSources_play, con_title, session, watch_title, summary, thumb, art, url, duration, genre): +def PSExtSources(extSources_play, con_title, session, watch_title, year, summary, thumb, art, url, duration, rating, genre): oc = ObjectContainer(title2 = unicode(con_title), no_cache=common.isForceNoCache()) + try: + summary = unicode(common.ascii_only(summary)) + #summary = unicode(str(summary).replace('"','').replace('\u00','')) + except: + summary = 'Not Available' + generic_playback_links = [] - for source in JSON.ObjectFromString(D(extSources_play)): + + all_sources = JSON.ObjectFromString(D(extSources_play)) + + for source in all_sources: + status = common.GetEmoji(type=source['online'], session=session) vidUrl = source['url'] - - if vidUrl != None: + + isTargetPlay = True if source['source'] not in ['gvideo','mega'] else False + isVideoOnline = source['online'] + + redirector_stat = '' + redirector_enabled = 'false' + if common.UsingOption(key=common.DEVICE_OPTIONS[2], session=session) and isTargetPlay == False: + redirector_stat = '| (via Redirector)' + redirector_enabled = 'true' + + pair_required = False + if source['source'] == 'openload' and (Prefs['use_openload_pairing'] or not common.is_uss_installed()): + pair_required = source['misc']['pair'] + + try: + file_size = '%s GB' % str(round(float(source['fs'])/common.TO_GB, 3)) + except: + file_size = '? GB' + + if source['vidtype'] in 'Movie/Show': + if source['titleinfo'] != '': + titleinfo = ' | ' + source['titleinfo'] + else: + titleinfo = '' + title_msg = "%s %s| %s | %s | %s | %s | %s" % (status, source['maininfo'], source['rip'], source['quality'], file_size, source['source']+':'+source['subdomain'] if source['source']=='gvideo' else source['source'], source['provider']) + else: + titleinfo = source['titleinfo'] + title_msg = "%s %s| %s | %s | %s | %s | %s | %s" % (status, source['maininfo'], source['vidtype'], source['rip'], source['quality'], file_size, source['source'], source['provider']) + + if common.DEV_DEBUG == True: + Log("%s --- %s" % (title_msg, vidUrl)) + Log('Playback: %s' % common.interface.getHostsPlaybackSupport(encode=False)[source['source']]) + + # all source links (not extras) that can be played via the code service + if vidUrl != None and source['enabled'] and source['allowsStreaming'] and source['misc']['player'] == 'iplayer' and common.interface.getHostsPlaybackSupport(encode=False)[source['source']]: + urldata = source['urldata'] + params = source['params'] + + if not Prefs['use_openload_pairing'] and 'openload' in source['source'] and common.is_uss_installed() and URLService.ServiceIdentifierForURL(vidUrl) != None: + durl = vidUrl + else: + durl = "fmovies://" + E(JSON.StringFromObject({"url":url, "server":vidUrl, "title":watch_title, "summary":summary, "thumb":thumb, "art":art, "year":year, "rating":rating, "duration":str(duration), "genre":genre, "directors":None, "roles":None, "isTargetPlay":str(isTargetPlay), "useSSL":Prefs["use_https_alt"], "isVideoOnline":str(isVideoOnline), "useRedirector": redirector_enabled, 'quality':source['quality'], 'urldata':urldata, 'params':params, 'pairrequired':pair_required, "host":source['source'], "openloadApiKey":Prefs['control_openload_api_key'], "force_transcode":common.UsingOption(key=common.DEVICE_OPTIONS[10], session=session)})) + try: + oc.add(VideoClipObject( + url = durl, + title = title_msg + titleinfo + redirector_stat, + thumb = GetThumb(thumb, session=session), + art = art, + summary = summary, + key = AddRecentWatchList(title = watch_title, url=url, summary=summary, thumb=thumb) + ) + ) + except Exception as e: + if Prefs["use_debug"]: + Log('ERROR init.py>PSExtSources>VideoClipObject-1 %s' % (e.args)) + + elif vidUrl != None: status = common.GetEmoji(type=source['online'], session=session) if source['vidtype'] in 'Movie/Show': title_msg = "%s %s| %s | %s | %s | %s" % (status, source['maininfo'], source['rip'], source['quality'], source['source'], source['provider']) @@ -3451,7 +3833,7 @@ def PSExtSources(extSources_play, con_title, session, watch_title, summary, thum key = AddRecentWatchList(title = watch_title, url=url, summary=summary, thumb=thumb) ) ) - else: + elif source['enabled'] == True: generic_playback_links.append((title_msg + source['titleinfo'] + ' | (via Generic Playback)', summary, GetThumb(thumb, session=session), source['params'], duration, genre, vidUrl, source['quality'], watch_title)) except: try: @@ -3528,7 +3910,7 @@ def GetThumb(thumb, session=None, **kwargs): #################################################################################################### @route(PREFIX + "/SimilarRecommendations") -def SimilarRecommendations(title, similar_reccos, referer=None, session = None, **kwargs): +def SimilarRecommendations(title, similar_reccos, referer=None, is9anime = 'False', session = None, **kwargs): oc = ObjectContainer(title2 = 'Similar to ' + title, no_cache=common.isForceNoCache()) @@ -3536,16 +3918,22 @@ def SimilarRecommendations(title, similar_reccos, referer=None, session = None, for elem in similar_reccos: name = elem['name'] - loc = fmovies.BASE_URL + elem['loc'] + if is9anime == 'False': + loc = fmovies.BASE_URL + elem['loc'] + dataEXSAnim = None + else: + loc = elem['loc'] + dataEXSAnim = loc + thumb = elem['thumb'] eps_nos = elem['eps_nos'] summary = 'Plot Summary on Item Page.' more_info_link = elem['more_info_link'] oc.add(DirectoryObject( - key = Callback(EpisodeDetail, title = name, url = loc, thumb = thumb, session = session), + key = Callback(EpisodeDetail, title = name, url = loc, thumb = thumb, session = session, dataEXSAnim = dataEXSAnim), title = name, - summary = GetMovieInfo(summary=summary, urlPath=more_info_link, referer=referer, session=session) + eps_nos, + summary = GetMovieInfo(summary=summary, urlPath=more_info_link, referer=referer, session=session, is9anime=is9anime) + eps_nos, thumb = Resource.ContentsOfURLWithFallback(url = thumb, fallback=ICON_UNAV) ) ) @@ -3561,7 +3949,7 @@ def SimilarRecommendations(title, similar_reccos, referer=None, session = None, #################################################################################################### @route(PREFIX + "/MoviesWithPeople") -def MoviesWithPeople(stars, session, **kwargs): +def MoviesWithPeople(stars, session, is9anime='False', **kwargs): oc = ObjectContainer(title2 = 'People Search', no_cache=common.isForceNoCache()) @@ -3570,8 +3958,13 @@ def MoviesWithPeople(stars, session, **kwargs): roles_s = sorted(roles_s) for role in roles_s: role = common.removeAccents(role) + if is9anime == 'False': + surl= fmovies.BASE_URL + fmovies.STAR_PATH + role.lower().replace(' ', '-') + else: + surl= common.ANIME_URL + fmovies.STAR_PATH + role.lower().replace(' ', '-') + oc.add(DirectoryObject( - key = Callback(Search, query = role, session = session, surl= fmovies.BASE_URL + fmovies.STAR_PATH + role.lower().replace(' ', '-'), mode = 'people'), + key = Callback(Search, query = role, session = session, surl= surl, mode = 'people', is9anime=is9anime), title = role + ' >>>', summary = 'Other movie/show starring ' + role, thumb = R(ICON_STAR) @@ -3589,7 +3982,7 @@ def MoviesWithPeople(stars, session, **kwargs): #################################################################################################### @route(PREFIX + "/MoviesWithTag") -def MoviesWithTag(tags, session, **kwargs): +def MoviesWithTag(tags, session, is9anime='False', **kwargs): oc = ObjectContainer(title2 = 'Tag Search', no_cache=common.isForceNoCache()) @@ -3598,13 +3991,18 @@ def MoviesWithTag(tags, session, **kwargs): tags_s = sorted(tags_s) for tag in tags_s: tag = re.sub(r'[^0-9a-zA-Z ]', '', tag) - oc.add(DirectoryObject( - key = Callback(Search, query = tag, session = session, surl= fmovies.BASE_URL + fmovies.KEYWORD_PATH + tag.lower().replace(' ', '-'), mode = 'tag'), - title = tag + ' >>>', - summary = 'Other movie/show with keyword ' + tag, - thumb = R(ICON_TAG) + if len(tag) > 0: + if is9anime == 'False': + surl= fmovies.BASE_URL + fmovies.KEYWORD_PATH + tag.lower().replace(' ', '-') + else: + surl= common.ANIME_URL + fmovies.KEYWORD_PATH + tag.lower().replace(' ', '-') + oc.add(DirectoryObject( + key = Callback(Search, query = tag, session = session, surl= surl, mode = 'tag', is9anime=is9anime), + title = tag + ' >>>', + summary = 'Other movie/show with keyword ' + tag, + thumb = R(ICON_TAG) + ) ) - ) oc.add(DirectoryObject( key = Callback(MainMenu), @@ -3617,10 +4015,12 @@ def MoviesWithTag(tags, session, **kwargs): #################################################################################################### @route(PREFIX + "/getmovieinfo") -def GetMovieInfo(summary, urlPath, referer=None, session=None, **kwargs): +def GetMovieInfo(summary, urlPath, referer=None, session=None, is9anime='False', **kwargs): - if common.NO_MOVIE_INFO == True or urlPath == None and (summary == None or summary == '') or Prefs['use_web_proxy'] or common.UsingOption(common.DEVICE_OPTIONS[8], session=session) == True: + if common.NO_MOVIE_INFO == True or urlPath == None and (summary == None or summary == '') or Prefs['use_web_proxy']: return 'Plot Summary on Item Page' + elif (is9anime == 'False' and common.UsingOption(common.DEVICE_OPTIONS[8], session=session) == True) or (is9anime == 'True' and common.UsingOption(common.DEVICE_OPTIONS[11], session=session) == True): + return 'Plot Summary on Item Page. Disabled via Device Options.' elif summary != None and Prefs["dont_fetch_more_info"]: return summary elif urlPath == None: @@ -3629,7 +4029,11 @@ def GetMovieInfo(summary, urlPath, referer=None, session=None, **kwargs): return 'Plot Summary on Item Page' try: - url = fmovies.BASE_URL + '/' + urlPath + if is9anime == 'False': + url = fmovies.BASE_URL + '/' + urlPath + else: + url = common.ANIME_URL + '/' + urlPath + page_data, error = common.GetPageElements(url=url, referer=referer) summary = '' @@ -3705,7 +4109,7 @@ def RecentWatchList(title, session=None, **kwargs): for each in Dict: longstring = str(Dict[each]) - if (('fmovies.' in longstring or 'bmovies.' in longstring) or ES_API_URL.lower() in longstring.lower()) and 'RR44SS' in longstring: + if (('fmovies.' in longstring or 'bmovies.' in longstring) or common.isArrayValueInString(common.EXT_SITE_URLS, longstring) == True) and 'RR44SS' in longstring: longstringsplit = longstring.split('RR44SS') urls_list.append({'key': each, 'time': longstringsplit[4], 'val': longstring}) @@ -3732,8 +4136,10 @@ def RecentWatchList(title, session=None, **kwargs): url = url.replace('www.','') ES = '' - if ES_API_URL.lower() in longstring.lower(): - ES = '*' + if common.ES_API_URL.lower() in longstring.lower(): + ES = common.EMOJI_EXT + if common.ANIME_URL.lower() in longstring.lower(): + ES = common.EMOJI_ANIME if url.replace('fmovies.to',fmovies_base) in items_in_recent or c > NO_OF_ITEMS_IN_RECENT_LIST: items_to_del.append(each['key']) @@ -3785,7 +4191,7 @@ def ClearRecentWatchList(**kwargs): for each in Dict: try: longstring = Dict[each] - if (('fmovies.' in longstring or 'bmovies.' in longstring) or ES_API_URL.lower() in longstring) and 'RR44SS' in longstring: + if (('fmovies.' in longstring or 'bmovies.' in longstring) or common.isArrayValueInString(common.EXT_SITE_URLS, longstring) == True) and 'RR44SS' in longstring: remove_list.append(each) except: continue @@ -3819,7 +4225,7 @@ def Bookmarks(title, session = None, **kwargs): for each in Dict: longstring = str(Dict[each]) - if (('fmovies.' in longstring or 'bmovies.' in longstring) or ES_API_URL.lower() in longstring.lower()) and 'Key5Split' in longstring: + if (('fmovies.' in longstring or 'bmovies.' in longstring) or common.isArrayValueInString(common.EXT_SITE_URLS, longstring) == True) and 'Key5Split' in longstring: stitle = unicode(longstring.split('Key5Split')[0]) url = longstring.split('Key5Split')[1] summary = unicode(longstring.split('Key5Split')[2]) @@ -3841,14 +4247,17 @@ def Bookmarks(title, session = None, **kwargs): if url not in items_in_bm: items_in_bm.append(url) - + is9anime = 'False' ES = '' - if ES_API_URL.lower() in url.lower(): - ES = '*' + if common.ES_API_URL.lower() in url.lower(): + ES = common.EMOJI_EXT + if common.ANIME_URL.lower() in url.lower(): + ES = common.EMOJI_ANIME + is9anime = 'True' if fmovies.FILTER_PATH in url: oc.add(DirectoryObject( - key=Callback(Search, query=stitle.replace(' (All Seasons)',''), session = session, mode='other seasons', thumb=thumb, summary=summary), + key=Callback(Search, query=stitle.replace(' (All Seasons)',''), session = session, mode='other seasons', thumb=thumb, summary=summary, is9anime=is9anime), title='%s%s' % (stitle,ES), thumb=thumb, summary=summary @@ -4775,7 +5184,7 @@ def Check(title, url, **kwargs): fmovies_urlhost = common.client.geturlhost(url) #Log("%s --- %s --- %s" % (longstring, url, fmovies_urlhost)) - if longstring != None and (longstring.lower()).find(ES_API_URL.lower()) != -1: + if longstring != None and common.isArrayValueInString(common.EXT_SITE_URLS, longstring) == True: return True if longstring != None and url in longstring: @@ -4858,7 +5267,7 @@ def ClearBookmarks(**kwargs): for each in Dict: try: url = Dict[each] - if url.find(ES_API_URL.lower()) != -1 and 'http' in url and 'RR44SS' not in url: + if common.isArrayValueInString(common.EXT_SITE_URLS, url) == True and 'http' in url and 'RR44SS' not in url: remove_list.append(each) except: continue @@ -4883,7 +5292,7 @@ def ClearSearches(**kwargs): try: if (each.find('fmovies') != -1 or each.find('bmovies') != -1 or each.find(common.TITLE.lower()) != -1) and 'MyCustomSearch' in each: remove_list.append(each) - elif each.find(ES_API_URL.lower()) != -1 and 'MyCustomSearch' in each: + elif common.isArrayValueInString(common.EXT_SITE_URLS, each) == True and 'MyCustomSearch' in each: remove_list.append(each) except: continue @@ -4900,7 +5309,7 @@ def ClearSearches(**kwargs): #################################################################################################### @route(PREFIX + "/search") -def Search(query=None, surl=None, page_count='1', mode='default', thumb=None, summary=None, session=None, **kwargs): +def Search(query=None, surl=None, page_count='1', mode='default', thumb=None, summary=None, is9anime='False', session=None, **kwargs): if not common.interface.isInitialized(): return MC.message_container("Please wait..", "Please wait a few seconds for the Interface to Load & Initialize plugins") @@ -4908,6 +5317,9 @@ def Search(query=None, surl=None, page_count='1', mode='default', thumb=None, su last_page_no = page_count query2 = None + if page_count=='1' and mode == 'default': + Thread.Create(AnimeSearchExt,{},query,session) + if surl != None: if mode == 'people' or mode == 'tag': url = surl + '?page=%s' % (str(page_count)) @@ -4920,7 +5332,10 @@ def Search(query=None, surl=None, page_count='1', mode='default', thumb=None, su Dict.Save() url = fmovies.BASE_URL + fmovies.SEARCH_PATH + '?page=%s&keyword=%s' % (str(page_count), String.Quote(query, usePlus=True)) elif mode == 'other seasons': - url = fmovies.BASE_URL + fmovies.FILTER_PATH + '?type=series&page=%s&keyword=%s' % (str(page_count), String.Quote(query, usePlus=True)) + if is9anime == 'False': + url = fmovies.BASE_URL + fmovies.FILTER_PATH + '?type=series&page=%s&keyword=%s' % (str(page_count), String.Quote(query, usePlus=True)) + else: + url = common.ANIME_URL + fmovies.FILTER_PATH + '?type=series&page=%s&keyword=%s' % (str(page_count), String.Quote(query, usePlus=True)) else: url = fmovies.BASE_URL + fmovies.SEARCH_PATH + '?page=%s&keyword=%s' % (str(page_count), String.Quote(query, usePlus=True)) @@ -4932,7 +5347,10 @@ def Search(query=None, surl=None, page_count='1', mode='default', thumb=None, su elems = [] errorB = False try: - elems = page_data.xpath(".//*[@id='body-wrapper']//div[@class='row movie-list']//div[@class='item']") + if is9anime == 'False': + elems = page_data.xpath(".//*[@id='body-wrapper']//div[@class='row movie-list']//div[@class='item']") + else: + elems = page_data.xpath(".//*[@id='body-wrapper']//div[@class='row']//div[@class='item']") last_page_no = int(page_count) last_page_no = int(page_data.xpath(".//*[@id='body-wrapper']//ul[@class='pagination'][1]//li[last()-1]//text()")[0]) except: @@ -4941,10 +5359,16 @@ def Search(query=None, surl=None, page_count='1', mode='default', thumb=None, su no_elems = len(elems) if errorB==True and no_elems == 0 and mode == 'other seasons': - xurl = fmovies.BASE_URL + fmovies.SEARCH_PATH + '?page=%s&keyword=%s' % (str(page_count), String.Quote(query, usePlus=True)) + if is9anime == 'False': + xurl = fmovies.BASE_URL + fmovies.SEARCH_PATH + '?page=%s&keyword=%s' % (str(page_count), String.Quote(query, usePlus=True)) + else: + xurl = common.ANIME_URL + fmovies.SEARCH_PATH + '?page=%s&keyword=%s' % (str(page_count), String.Quote(query, usePlus=True)) page_data, error = common.GetPageElements(url=xurl, timeout=7) try: - elems = page_data.xpath(".//*[@id='body-wrapper']//div[@class='row movie-list']//div[@class='item']") + if is9anime == 'False': + elems = page_data.xpath(".//*[@id='body-wrapper']//div[@class='row movie-list']//div[@class='item']") + else: + elems = page_data.xpath(".//*[@id='body-wrapper']//div[@class='row']//div[@class='item']") last_page_no = int(page_count) last_page_no = int(page_data.xpath(".//*[@id='body-wrapper']//ul[@class='pagination'][1]//li[last()-1]//text()")[0]) errorB = False @@ -4968,7 +5392,10 @@ def Search(query=None, surl=None, page_count='1', mode='default', thumb=None, su if no_elems > 0: for elem in elems: name = elem.xpath(".//a[@class='name']//text()")[0] - loc = fmovies.BASE_URL + elem.xpath(".//a[@class='name']//@href")[0] + if is9anime == 'False': + loc = fmovies.BASE_URL + elem.xpath(".//a[@class='name']//@href")[0] + else: + loc = elem.xpath(".//a[@class='name']//@href")[0] thumb_t = elem.xpath(".//a[@class='poster']//@src")[0] thumb = thumb_t if 'url' not in thumb_t else thumb_t.split('url=')[1] summary = 'Plot Summary on Item Page.' @@ -5020,26 +5447,38 @@ def Search(query=None, surl=None, page_count='1', mode='default', thumb=None, su except Exception as e: Log('__init.py__ > Search Error: %s URL: %s' % (e, url)) pass - + + oc_ext = [] if Prefs['disable_extsources'] == False and common.interface.isInitialized() and page_count=='1' and mode == 'default': if common.SEARCH_EXT_SOURCES_FROM_SEARCH_MENU == True: try: oc_ext = SearchExt(query=query, query2=query2, append='true', session=session) - for o in oc_ext: - oc.add(o) except: pass else: - oc.add(DirectoryObject( + oc_ext.append(DirectoryObject( key = Callback(SearchExt, query=query, session=session), title = 'Search in External Sources', summary = 'Search for a possible match in External Sources', thumb = R(ICON_SEARCH) ) ) - + + if page_count=='1' and mode == 'default' and len(common.ANIME_SEARCH) > 0: + for o in common.ANIME_SEARCH: + try: + oc.add(o) + except: + pass + + if page_count=='1' and mode == 'default' and len(oc_ext) > 0: + for o in oc_ext: + try: + oc.add(o) + except: + pass + if len(oc) == 0: - try: error = page_data.xpath(".//*[@id='body-wrapper']//div[@class='alert alert-danger']//p[1]//text()")[0] error_msg = page_data.xpath(".//*[@id='body-wrapper']//div[@class='alert alert-danger']//p[3]//text()")[0] @@ -5061,7 +5500,7 @@ def Search(query=None, surl=None, page_count='1', mode='default', thumb=None, su if mode == 'default' or mode == 'people' or mode == 'tag' or (mode == 'other seasons' and no_elems == len(oc)): if int(page_count) < last_page_no: oc.add(NextPageObject( - key = Callback(Search, query = query, session = session, surl = surl, page_count = str(int(page_count) + 1), mode=mode), + key = Callback(Search, query = query, session = session, surl = surl, page_count = str(int(page_count) + 1), mode=mode, is9anime=is9anime), title = "Next Page (" + str(int(page_count) + 1) +'/'+ str(last_page_no) + ") >>", thumb = R(ICON_NEXT) ) @@ -5094,6 +5533,33 @@ def Search(query=None, surl=None, page_count='1', mode='default', thumb=None, su return oc +#################################################################################################### +@route(PREFIX + "/AnimeSearchExt") +def AnimeSearchExt(query=None, session=None, **kwargs): + + del common.ANIME_SEARCH[:] + + url = common.ANIME_SEARCH_URL % String.Quote(query, usePlus=True) + page_data, error = common.GetPageElements(url=url, timeout=7) + + if page_data != None: + items = page_data.xpath("//*[@id='body-wrapper']//div[@class='item']") + for i in items: + try: + thumb = i.xpath(".//@src")[0] + title = i.xpath(".//a[@class='name']//text()")[0] + url = i.xpath(".//@href")[0] + summary = 'Available on item page.' + dobj = DirectoryObject( + key = Callback(EpisodeDetail, title=title, url=url, thumb=thumb, session=session, dataEXSAnim=url), + title = '%s %s' % (common.EMOJI_ANIME, title), + summary = summary, + thumb = Resource.ContentsOfURLWithFallback(url = thumb, fallback=ICON_UNAV) + ) + common.ANIME_SEARCH.append(dobj) + except Exception as e: + Log(e) + #################################################################################################### @route(PREFIX + "/SearchExt") def SearchExt(query=None, query2=None, session=None, xtitle=None, xyear=None, xtype=None, ximdbid=None, xsummary=None, xthumb=None, xitem=None, append='false', final='false', **kwargs): @@ -5261,6 +5727,8 @@ def SearchExt(query=None, query2=None, session=None, xtitle=None, xyear=None, xt summary = unicode(summary.replace('–','-')) + summary = unicode(common.ascii_only(summary)) + xthumb = GetThumb(thumb, session=session) xitem = E(JSON.StringFromObject(item)) @@ -5277,13 +5745,13 @@ def SearchExt(query=None, query2=None, session=None, xtitle=None, xyear=None, xt dobj = DirectoryObject( key = Callback(DoIMDBExtSources, title=title, year=year, type=type, imdbid=imdbid, summary=summary, thumb=xthumb, session=session), - title = '*'+watch_title, + title = common.EMOJI_EXT+watch_title, summary = summary, thumb = xthumb) else: dobj = DirectoryObject( key = Callback(SearchExt, query=query, query2=query2, session=session, xtitle=title, xyear=year, xtype=type, ximdbid=imdbid, xsummary=summary, xthumb=xthumb, xitem=xitem, append='false', final='false'), - title = '*'+watch_title, + title = common.EMOJI_EXT+watch_title, summary = summary, thumb = xthumb) @@ -5481,7 +5949,7 @@ def DoIMDBExtSources(title, year, type, imdbid, season=None, episode=None, episo oc.add(DirectoryObject( key = Callback(DoIMDBExtSources, title=x_title, year=x_year, type=type, imdbid=imdbid, item=E(JSON.StringFromObject(item)), season=season, episode=str(e), session=session), - title = '*'+watch_title, + title = common.EMOJI_EXT+watch_title, summary = summary, thumb = thumb)) @@ -5550,7 +6018,7 @@ def DoIMDBExtSources(title, year, type, imdbid, season=None, episode=None, episo oc.add(DirectoryObject( key = Callback(DoIMDBExtSources, title=title, year=year, type=type, imdbid=imdbid, item=E(JSON.StringFromObject(item)), season=season, episode=episode, session=session, final=True), - title = '*'+watch_title, + title = common.EMOJI_EXT+watch_title, summary = summary, thumb = thumb) ) @@ -5686,7 +6154,7 @@ def DoIMDBExtSourcesEpisode(query, title, year, type, imdbid, season, summary, t oc = ObjectContainer(title2='%s (%s)' % (title, watch_title), no_cache=common.isForceNoCache()) oc.add(DirectoryObject( key = Callback(DoIMDBExtSources, title=title, year=year, type=type, imdbid=imdbid, item=E(JSON.StringFromObject(item)), simpleSummary=True, season=season, episode=episode, session=session, final=True), - title = '*'+watch_title, + title = common.EMOJI_EXT+watch_title, summary = '%s : %s' % (watch_title,summary), thumb = Resource.ContentsOfURLWithFallback(url = thumb, fallback = ICON_UNAV))) @@ -6238,10 +6706,10 @@ def ShowCategoryES(title, filter=None, page_count='1', last_page_no=None, sessio # Build Filter-Search Url #http://movies-v2.api-fetch.website/movies/1?sort=trending&limit=50&year=2017&genre=Comedy&order=-1 - apiUrl = ES_API_URL + '/%s' % urllib2.quote(searchString, safe='%/_-+=&?') + apiUrl = common.ES_API_URL + '/%s' % urllib2.quote(searchString, safe='%/_-+=&?') if last_page_no == None: - pagesUrl = ES_API_URL + '/%s' % filter['type'] + pagesUrl = common.ES_API_URL + '/%s' % filter['type'] pages_data, error = common.GetPageAsString(url=pagesUrl) #Log(pages_data) #Log(error) @@ -6308,7 +6776,7 @@ def ShowCategoryES(title, filter=None, page_count='1', last_page_no=None, sessio type = filter['type'][0:-1] subtype = 'movie' - loc = ES_API_URL + '/%s/%s' % (type,id) + loc = common.ES_API_URL + '/%s/%s' % (type,id) data = {} data['title'] = name diff --git a/Contents/Code/common.py b/Contents/Code/common.py index d337fd3..82d9019 100644 --- a/Contents/Code/common.py +++ b/Contents/Code/common.py @@ -1,6 +1,6 @@ ################################################################################ TITLE = "FMoviesPlus" -VERSION = '0.49' # Release notation (x.y - where x is major and y is minor) +VERSION = '0.50' # Release notation (x.y - where x is major and y is minor) TAG = '' GITHUB_REPOSITORY = 'coder-alpha/FMoviesPlus.bundle' PREFIX = "/video/fmoviesplus" @@ -70,6 +70,8 @@ EMOJI_CASSETTE = u'\U0001F4FC' EMOJI_CINEMA = u'\U0001F3A6' EMOJI_TV = u'\U0001F4FA' +EMOJI_ANIME = u'\u2318' +EMOJI_EXT = u'*' # Simple Emoji's EMOJI_HEART = u'\u2665' @@ -96,7 +98,7 @@ INTERNAL_SOURCES_RIPTYPE_CONST = [{'label':'BRRIP','enabled': 'True'},{'label':'PREDVD','enabled': 'True'},{'label':'CAM','enabled': 'True'},{'label':'TS','enabled': 'True'},{'label':'SCR','enabled': 'True'},{'label':'UNKNOWN','enabled': 'True'}] INTERNAL_SOURCES_FILETYPE_CONST = [{'label':'Movie/Show','enabled': 'True'},{'label':'Trailer','enabled': 'True'},{'label':'Behind the scenes','enabled': 'False'},{'label':'Music Video','enabled': 'False'},{'label':'Deleted Scenes','enabled': 'False'},{'label':'Interviews','enabled': 'False'},{'label':'Misc.','enabled': 'False'}] -DEVICE_OPTIONS = ['Dumb-Keyboard','List-View','Redirector','Simple-Emoji','Vibrant-Emoji','Multi-Link-View','Full-poster display','Use-PhantomJS','No-Extra-Page-Info','Use-FileSize-Sorting'] +DEVICE_OPTIONS = ['Dumb-Keyboard','List-View','Redirector','Simple-Emoji','Vibrant-Emoji','Multi-Link-View','Full-poster display','Use-PhantomJS','No-Extra-Page-Info','Use-FileSize-Sorting','Force-Transcoding','No-Extra-Page-Info (Anime)'] DEVICE_OPTION = {DEVICE_OPTIONS[0]:'The awesome Keyboard for Search impaired devices', DEVICE_OPTIONS[1]:'Force List-View of Playback page listing sources', DEVICE_OPTIONS[2]:'Required in certain cases - *Experimental (refer forum)', @@ -106,7 +108,9 @@ DEVICE_OPTIONS[6]:'Shows Uncropped Poster - client compatibility is untested', DEVICE_OPTIONS[7]:'Use PhantomJS - For parsing links. Binary download required', DEVICE_OPTIONS[8]:'No-Extra-Page-Info - Speeds up navigation by not downloading detailed item info', - DEVICE_OPTIONS[9]:'Use-FileSize-Sorting - Uses FileSize instead of Resolution info provided by site which can be inaccurate'} + DEVICE_OPTIONS[9]:'Use-FileSize-Sorting - Uses FileSize instead of Resolution info provided by site which can be inaccurate', + DEVICE_OPTIONS[10]:'Force-Transcoding - Sets the item\'s container property to null in order to force transcoding by PMS', + DEVICE_OPTIONS[11]:'No-Extra-Page-Info (Anime) - Speeds up navigation by not downloading detailed item info'} DEVICE_OPTION_CONSTRAINTS = {DEVICE_OPTIONS[2]:[{'Pref':'use_https_alt','Desc':'Use Alternate SSL/TLS','ReqValue':'disabled'}]} DEVICE_OPTION_CONSTRAINTS2 = {DEVICE_OPTIONS[5]:[{'Option':6,'ReqValue':False}], DEVICE_OPTIONS[6]:[{'Option':5,'ReqValue':False}]} DEVICE_OPTION_PROPOGATE_TO_CONTROL = {DEVICE_OPTIONS[7]:True} @@ -124,6 +128,14 @@ DOWNLOAD_TEMP = {} DOWNLOAD_FMP_EXT = '.FMPTemp' +ANIME_SEARCH = [] +ANIME_KEY = '9anime' +ANIME_URL = 'https://%s.is' % ANIME_KEY +ANIME_SEARCH_URL = ANIME_URL + '/search?keyword=%s' +ES_API_URL = 'http://movies-v2.api-fetch.website' + +EXT_SITE_URLS = [ANIME_URL, ES_API_URL] + # Golbal Overrides - to disable SHOW_EXT_SRC_WHILE_LOADING = True USE_DOWNLOAD_RESUME_GEN = True @@ -296,6 +308,18 @@ def getHighestQualityLabel(strr, q_label): else: return q_label +####################################################################################################### +def isArrayValueInString(arr, mystr, toLowercase=True): + + for a in arr: + if toLowercase == True: + if a.lower() in mystr.lower(): + return True + else: + if a in mystr: + return True + + return False ####################################################################################################### @route(PREFIX + "/setDictVal") @@ -602,6 +626,7 @@ def GetPageElements(url, headers=None, referer=None, timeout=15): CACHE_EXPIRY = 60 * int(Prefs["cache_expiry_time"]) except: CACHE_EXPIRY = CACHE_EXPIRY_TIME + if CACHE_META[url]['ts'] + CACHE_EXPIRY > time.time(): page_data_string = D(CACHE_META[url]['data']) @@ -611,12 +636,13 @@ def GetPageElements(url, headers=None, referer=None, timeout=15): if page_data_string == None: raise PageError('Request returned None.') - page_data_elems = HTML.ElementFromString(page_data_string) + try: + page_data_elems = HTML.ElementFromString(page_data_string) + except Exception as e: + if url in CACHE_META.keys(): + del CACHE_META[url] + raise Exception(e) - CACHE_META[url] = {} - CACHE_META[url]['ts'] = time.time() - CACHE_META[url]['data'] = E(page_data_string) - except Exception as e: Log('ERROR common.py>GetPageElements: %s URL: %s DATA: %s' % (error,url,page_data_string)) @@ -746,6 +772,12 @@ def GetPageAsString(url, headers=None, timeout=15, referer=None): pass if page_data_string == None: error, page_data_string = interface.request(url = url, headers=headers, timeout=str(timeout), error=True) + + if url not in CACHE_META.keys() and page_data_string != None and error == '': + CACHE_META[url] = {} + CACHE_META[url]['ts'] = time.time() + CACHE_META[url]['data'] = E(page_data_string) + except Exception as e: Log('ERROR common.py>GetPageAsString: %s URL: %s' % (e.args,url)) pass diff --git a/Contents/Code/fmovies.py b/Contents/Code/fmovies.py index 440aebd..bc8b8fb 100644 --- a/Contents/Code/fmovies.py +++ b/Contents/Code/fmovies.py @@ -79,101 +79,112 @@ def GetApiUrl(url, key, serverts=0, use_debug=True, use_https_alt=False, use_web res = None isTargetPlay = False else: - if use_debug: - Log("Retrieving Fresh Movie Link") - - ret, isTargetPlay, error, host, res_subtitle = get_sources(url=url, key=key, use_debug=use_debug, serverts=serverts, myts=myts, use_https_alt=use_https_alt, use_web_proxy=use_web_proxy) - if use_debug: - Log("get_sources url: %s, key: %s" % (url,key)) - Log("get_sources ret: %s" % ret) - token_error = False - if error != None and 'token' in error: - token_error = True - - # if the request ever fails - clear CACHE right away and make 2nd attempt - # if token error make 2nd attempt using modified code - if common.USE_SECOND_REQUEST == True and (ret == None or token_error == True): - common.CACHE.clear() + try: if use_debug: - Log("Using Second Request due to token error") - Log("CACHE cleared due to null response from API - maybe cookie issue for %s" % url) - time.sleep(1.0) - ret, isTargetPlay, error, host, res_subtitle = get_sources(url=url, key=key, use_debug=use_debug, serverts=serverts, myts=myts, use_https_alt=use_https_alt, use_web_proxy=use_web_proxy, token_error=token_error) + Log("Retrieving Fresh Movie Link") + + ret, isTargetPlay, error, host, res_subtitle = get_sources(url=url, key=key, use_debug=use_debug, serverts=serverts, myts=myts, use_https_alt=use_https_alt, use_web_proxy=use_web_proxy) if use_debug: - Log("API - attempt 2nd") Log("get_sources url: %s, key: %s" % (url,key)) Log("get_sources ret: %s" % ret) - - if ret == None: - if use_debug: - Log("null response from API (possible file deleted) - for %s" % url) - return res, isTargetPlay, error, host, res_subtitle - else: - if isTargetPlay: - res = ret - common.CACHE[key] = {} - common.CACHE[key]['res'] = res - common.CACHE[key]['res_subtitle'] = res_subtitle - common.CACHE[key]['host'] = host - common.CACHE[key]['serverts'] = serverts - common.CACHE[key]['myts'] = myts - common.CACHE[key]['isTargetPlay'] = str(isTargetPlay) + Log("get_sources error: %s" % error) + token_error = False + if error != None and 'token' in error: + token_error = True + + # if the request ever fails - clear CACHE right away and make 2nd attempt + # if token error make 2nd attempt using modified code + if common.USE_SECOND_REQUEST == True and (ret == None or token_error == True): + common.CACHE.clear() if use_debug: - Log("Added " + key + " to CACHE") - #surl = common.host_openload.resolve(url=res, embedpage=True) - surl = ret + Log("Using Second Request due to token error") + Log("CACHE cleared due to null response from API - maybe cookie issue for %s" % url) + time.sleep(1.0) + ret, isTargetPlay, error, host, res_subtitle = get_sources(url=url, key=key, use_debug=use_debug, serverts=serverts, myts=myts, use_https_alt=use_https_alt, use_web_proxy=use_web_proxy, token_error=token_error) if use_debug: - Log("Target-Play Stream URL %s from %s" % (surl,ret)) - res = surl - else: - # fix api url to https - ret = ret.replace('http://','https://') - data = None - headersS = {'X-Requested-With': 'XMLHttpRequest'} - headersS['Referer'] = '%s/%s' % (url, key) - headersS['Cookie'] = common.CACHE_COOKIE[0]['cookie'] - try: - time.sleep(1.0) - data = common.interface.request_via_proxy_as_backup(ret, limit='0', headers=headersS, httpsskip=use_https_alt, hideurl=not use_debug) - data = json.loads(data) - except Exception as e: - Log.Error('ERROR fmovies.py>GetApiUrl-1: ARGS:%s, URL:%s' % (e,ret)) - pass + Log("API - attempt 2nd") + Log("get_sources url: %s, key: %s" % (url,key)) + Log("get_sources ret: %s" % ret) - if data == None: - return None, isTargetPlay, error, host, res_subtitle - if data['error'] == None: - res = JSON.StringFromObject(data['data']) - - add_bool = True + if ret == None: + if use_debug: + Log("null response from API (possible file deleted) - for %s" % url) + return res, isTargetPlay, error, host, res_subtitle + else: + if isTargetPlay: + res = ret + common.CACHE[key] = {} + common.CACHE[key]['res'] = res + common.CACHE[key]['res_subtitle'] = res_subtitle + common.CACHE[key]['host'] = host + common.CACHE[key]['serverts'] = serverts + common.CACHE[key]['myts'] = myts + common.CACHE[key]['isTargetPlay'] = str(isTargetPlay) + if use_debug: + Log("Added " + key + " to CACHE") + #surl = common.host_openload.resolve(url=res, embedpage=True) + surl = ret + if use_debug: + Log("Target-Play Stream URL %s from %s" % (surl,ret)) + res = surl + else: + # fix api url to https + ret = ret.replace('http://','https://') + data = None + headersS = {'X-Requested-With': 'XMLHttpRequest'} + headersS['Referer'] = '%s/%s' % (url, key) + headersS['Cookie'] = common.CACHE_COOKIE[0]['cookie'] try: - for ign in CACHE_IGNORELIST: - for res_file in data['data']: - if ign in res_file['file']: - add_bool = False - break + time.sleep(1.0) + data = common.interface.request_via_proxy_as_backup(ret, limit='0', headers=headersS, httpsskip=use_https_alt, hideurl=not use_debug) + if 'Warning' in data or 'Notice' in data: + Log('**Fixing requested page error responses**') + data = re.findall(r'{.*}',data)[0] + subdata = re.findall(r'', data) + if len(subdata) > 0: + data = data.replace(subdata[0],'') + data = json.loads(data) except Exception as e: - Log.Error("ERROR: %s" % e) - - if add_bool == True: - common.CACHE[key] = {} - common.CACHE[key]['res'] = res - common.CACHE[key]['res_subtitle'] = res_subtitle - common.CACHE[key]['host'] = host - common.CACHE[key]['serverts'] = serverts - common.CACHE[key]['myts'] = myts - common.CACHE[key]['isTargetPlay'] = str(isTargetPlay) - if use_debug: - Log("Added " + key + " to CACHE") - Log("Added " + res + " to " + key) + Log.Error('ERROR fmovies.py>GetApiUrl-1: ARGS:%s, URL:%s' % (e,ret)) + res = None + raise Exception('Site returned unknown response') + + if data == None: + return None, isTargetPlay, error, host, res_subtitle + if data['error'] == None: + res = JSON.StringFromObject(data['data']) + + add_bool = True + try: + for ign in CACHE_IGNORELIST: + for res_file in data['data']: + if ign in res_file['file']: + add_bool = False + break + except Exception as e: + Log.Error("ERROR: %s" % e) + + if add_bool == True: + common.CACHE[key] = {} + common.CACHE[key]['res'] = res + common.CACHE[key]['res_subtitle'] = res_subtitle + common.CACHE[key]['host'] = host + common.CACHE[key]['serverts'] = serverts + common.CACHE[key]['myts'] = myts + common.CACHE[key]['isTargetPlay'] = str(isTargetPlay) + if use_debug: + Log("Added " + key + " to CACHE") + Log("Added " + res + " to " + key) + else: + if use_debug: + Log("*IgnoreList URL* Not Added " + key + " to CACHE") + Log("*IgnoreList URL* Not Added " + res + " to " + key) + elif data['error'] != None: + error = data['error'] else: - if use_debug: - Log("*IgnoreList URL* Not Added " + key + " to CACHE") - Log("*IgnoreList URL* Not Added " + res + " to " + key) - elif data['error'] != None: - error = data['error'] - else: - error = 'Unknown error' + error = 'Unknown error' + except Exception as e: + error = e return res, isTargetPlay, error, host, res_subtitle @@ -268,7 +279,7 @@ def setTokenCookie(serverts=None, use_debug=False, reset=False, dump=False, quie time.sleep(0.1) del common.TOKEN_CODE[:] - if r in common.client.HTTP_GOOD_RESP_CODES and '503 Service Unavailable' not in r1: + if r in common.client.HTTP_GOOD_RESP_CODES and '503 Service Unavailable' not in r1 and 'NotFoundHttpException' not in r1: token_enc = common.client.b64encode(r1) common.TOKEN_CODE.append(token_enc) @@ -572,7 +583,8 @@ def get_sources(url, key, use_debug=True, serverts=0, myts=0, use_https_alt=Fals referer = url serverts = str(serverts) T_BASE_URL = BASE_URL - #T_BASE_URL = "https://fmovies.unlockpro.top" + T_BASE_URL = 'https://%s' % common.client.geturlhost(url) + is9Anime = True if common.ANIME_KEY in url else False time.sleep(0.5) @@ -591,7 +603,7 @@ def get_sources(url, key, use_debug=True, serverts=0, myts=0, use_https_alt=Fals hash_url = urlparse.urljoin(T_BASE_URL, HASH_PATH_INFO) query = {'ts': serverts, 'id': key, 'update':'0', 'server':'36'} - tk = get_token(query, token_error) + tk = get_token(query, token_error, is9Anime) if tk == None: raise ValueError('video token algo') @@ -621,7 +633,7 @@ def get_sources(url, key, use_debug=True, serverts=0, myts=0, use_https_alt=Fals error = result['error'] elif result['target'] != "": grabber = result['target'] - b, resp = decode_t(grabber, -18) + b, resp = decode_t(grabber, -18, is9Anime) if b == False: raise ValueError(resp) grabber = resp @@ -639,17 +651,17 @@ def get_sources(url, key, use_debug=True, serverts=0, myts=0, use_https_alt=Fals grab_server = str(urlparse.parse_qs(grab_data)['server'][0]) - b, resp = decode_t(result['params']['token'], -18) + b, resp = decode_t(result['params']['token'], -18, is9Anime) if b == False: raise ValueError(resp) token = resp - b, resp = decode_t(result['params']['options'], -18) + b, resp = decode_t(result['params']['options'], -18, is9Anime) if b == False: raise ValueError(resp) options = resp grab_query = {'ts':serverts, grabber_url:'','id':result['params']['id'],'server':grab_server,'mobile':'0','token':token,'options':options} - tk = get_token(grab_query, token_error) + tk = get_token(grab_query, token_error, is9Anime) if tk == None: raise ValueError('video token algo') @@ -696,11 +708,14 @@ def r01(t, e, token_error=False): h = format(int(hex(n),16),'x') return h -def a01(t, token_error=False): +def a01(t, token_error=False, is9Anime=False): i = 0 for e in range(0, len(t)): if token_error == False: - i += ord(t[e]) + e + if is9Anime == False: + i += ord(t[e]) + e + else: + i += ord(t[e]) * e else: try: i += eval('ord(t[%s]) %s' % (e, TOKEN_OPER[0])) @@ -709,12 +724,14 @@ def a01(t, token_error=False): return i #6856 -def decode_t(t, i): +def decode_t(t, i, is9Anime=False, **kwargs): n = [] e = [] r = '' try: + if is9Anime == True: + return True, t for n in range(0, len(t)): if n==0 and t[n] == '.': pass @@ -734,12 +751,16 @@ def decode_t(t, i): Log("fmovies.py > decode_t > %s" % e) return False, 'Error in decoding val' -def get_token(n, token_error=False, **kwargs): +def get_token(n, token_error=False, is9Anime=False, **kwargs): try: - d = TOKEN_KEY[0] - s = a01(d, token_error) + if is9Anime == False: + d = TOKEN_KEY[0] + else: + d = common.control.setting('9animeVidToken') + + s = a01(d, token_error, is9Anime) for i in n: - s += a01(r01(d + i, n[i]), token_error) + s += a01(r01(d + i, n[i]), token_error, is9Anime) return {'_': str(s)} except Exception as e: Log("fmovies.py > get_token > %s" % e) diff --git a/Contents/Code/tools.py b/Contents/Code/tools.py index 703249e..ce3535d 100644 --- a/Contents/Code/tools.py +++ b/Contents/Code/tools.py @@ -37,8 +37,6 @@ MC = common.NewMessageContainer(PREFIX, TITLE) -ES_API_URL = 'http://movies-v2.api-fetch.website' - BACKUP_KEYS = ['DOWNLOAD_OPTIONS','INTERNAL_SOURCES_QUALS', 'INTERNAL_SOURCES_SIZES', 'INTERNAL_SOURCES_RIPTYPE', 'INTERNAL_SOURCES_FILETYPE', 'OPTIONS_PROVIDERS', 'OPTIONS_PROXY', 'INTERNAL_SOURCES'] #################################################################################################### @@ -147,7 +145,7 @@ def SaveBookmarks(**kwargs): for each in Dict: longstring = str(Dict[each]) - if (('fmovies.' in longstring or 'bmovies.' in longstring) or ES_API_URL.lower() in longstring) and 'Key5Split' in longstring: + if (('fmovies.' in longstring or 'bmovies.' in longstring) or common.isArrayValueInString(common.EXT_SITE_URLS, longstring) == True) and 'Key5Split' in longstring: stitle = unicode(longstring.split('Key5Split')[0]) url = longstring.split('Key5Split')[1] summary = unicode(longstring.split('Key5Split')[2]) @@ -225,7 +223,7 @@ def SaveConfig(**kwargs): for each in Dict: longstring = str(Dict[each]) - if (('fmovies.' in longstring or 'bmovies.' in longstring) or ES_API_URL.lower() in longstring) and 'Key5Split' in longstring: + if (('fmovies.' in longstring or 'bmovies.' in longstring) or common.isArrayValueInString(common.EXT_SITE_URLS, longstring) == True) and 'Key5Split' in longstring: stitle = unicode(longstring.split('Key5Split')[0]) url = longstring.split('Key5Split')[1] summary = unicode(longstring.split('Key5Split')[2]) @@ -253,7 +251,7 @@ def SaveConfig(**kwargs): for each in Dict: longstring = str(Dict[each]) - if (('fmovies.' in longstring or 'bmovies.' in longstring) or ES_API_URL.lower() in longstring.lower()) and 'RR44SS' in longstring: + if (('fmovies.' in longstring or 'bmovies.' in longstring) or common.isArrayValueInString(common.EXT_SITE_URLS, longstring) == True) and 'RR44SS' in longstring: longstringsplit = longstring.split('RR44SS') urls_list.append({'key': each, 'time': longstringsplit[4], 'val': longstring}) @@ -278,8 +276,10 @@ def SaveConfig(**kwargs): url = url.replace('www.','') ES = '' - if ES_API_URL.lower() in longstring.lower(): - ES = '*' + if common.ES_API_URL.lower() in longstring.lower(): + ES = common.EMOJI_EXT + if common.ANIME_URL.lower() in longstring.lower(): + ES = common.EMOJI_ANIME if url.replace('fmovies.to',fmovies_base) in items_in_recent: items_to_del.append(each['key']) diff --git a/Contents/DefaultPrefs.json b/Contents/DefaultPrefs.json index 3dbd50b..b092fe3 100644 --- a/Contents/DefaultPrefs.json +++ b/Contents/DefaultPrefs.json @@ -1 +1 @@ -[ { "id": "new_base_url", "label": "Base site url", "type": "enum", "values": ["https://bmovies.to","https://fmovies.se","https://fmovies.is","https://fmovies.to"], "default": "https://bmovies.to" }, { "id": "webhook_url", "label": "Webhook url (https://hook.io/coder-alpha/test/fork <- fork your own)", "type": "text", "default": "https://hook.io/coder-alpha/test/" }, { "id": "reqkey_cookie", "label": "Request Cookie (reqkey cookie val.) - manual method/keep blank otherwise", "type": "text", "default": "" }, { "id": "cache_expiry_time", "label": "Cache Expiry Time (in mins.)", "type": "text", "default": "100" }, { "id": "dont_fetch_more_info", "label": "No Extra Info. for Nav. Pages (Speeds Up Navigation)", "type": "bool", "default": "false" }, { "id": "use_https_alt", "label": "Use Alternate SSL/TLS", "type": "bool", "default": "false" }, { "id": "use_web_proxy", "label": "Use SSL Web-Proxy", "type": "bool", "default": "false" }, { "id": "disable_extsources", "label": "Disable External Sources", "type": "bool", "default": "false" }, { "id": "disable_downloader", "label": "Disable Downloading Sources (Please read Plex Privacy policy)", "type": "bool", "default": "true" }, { "id": "download_connections", "label": "Number of concurrent Download Threads", "type": "enum", "values": ["1","2","3","4","5","6","7","8","9","10"], "default": "2" }, { "id": "download_speed_limit", "label": "Limit Aggregate Download Speed (KB/s)", "type": "enum", "values": ["0","128","256","512","1024","2048","5120","10240","20480","30720","40960","51200"], "default": "0" }, { "id": "use_linkchecker", "label": "Use LinkChecker for Videos", "type": "bool", "default": "false" }, { "id": "ca_api_key", "label": "External Sources Search API Key", "type": "text", "option": "hidden", "secure": "true", "default": "UTBFeU1ERTM=" }, { "id": "control_all_uc_api_key", "label": "All UC API Key (Signup: http://accounts.alluc.com/)", "type": "text", "option": "hidden", "secure": "true", "default": "" }, { "id": "use_openload_pairing", "label": "Use OpenLoad (PhantomJS is primary, API Login:Key is secondary followed by Pairing. Disabled uses USS as primary)", "type": "bool", "default": "true" }, { "id": "control_openload_api_key", "label": "Openload API Key enter as Login:Key (Signup: https://openload.co/register)", "type": "text", "option": "hidden", "secure": "true", "default": "" }, { "id": "use_phantomjs", "label": "Use PhantomJS (Binary download required)", "type": "bool", "default": "false" }, { "id": "plextv", "label": "Auth Admin through Plex.tv (else use localhost)", "type": "bool", "default": "false" }, { "id": "use_debug", "label": "Enable Debug Mode (might show IP and computer Username in Logs)", "type": "bool", "default": "false" } ] \ No newline at end of file +[ { "id": "new_base_url", "label": "Base site url", "type": "enum", "values": ["https://bmovies.to","https://bmovies.pro","https://bmovies.is","https://fmovies.se","https://fmovies.is","https://fmovies.to"], "default": "https://bmovies.to" }, { "id": "webhook_url", "label": "Webhook url (https://hook.io/coder-alpha/test/fork <- fork your own)", "type": "text", "default": "https://hook.io/coder-alpha/test/" }, { "id": "reqkey_cookie", "label": "Request Cookie (reqkey cookie val.) - manual method/keep blank otherwise", "type": "text", "default": "" }, { "id": "cache_expiry_time", "label": "Cache Expiry Time (in mins.)", "type": "text", "default": "100" }, { "id": "dont_fetch_more_info", "label": "No Extra Info. for Nav. Pages (Speeds Up Navigation)", "type": "bool", "default": "false" }, { "id": "use_https_alt", "label": "Use Alternate SSL/TLS", "type": "bool", "default": "false" }, { "id": "use_web_proxy", "label": "Use SSL Web-Proxy", "type": "bool", "default": "false" }, { "id": "disable_extsources", "label": "Disable External Sources", "type": "bool", "default": "false" }, { "id": "disable_downloader", "label": "Disable Downloading Sources (Please read Plex Privacy policy)", "type": "bool", "default": "true" }, { "id": "download_connections", "label": "Number of concurrent Download Threads", "type": "enum", "values": ["1","2","3","4","5","6","7","8","9","10"], "default": "2" }, { "id": "download_speed_limit", "label": "Limit Aggregate Download Speed (KB/s)", "type": "enum", "values": ["0","128","256","512","1024","2048","5120","10240","20480","30720","40960","51200"], "default": "0" }, { "id": "use_linkchecker", "label": "Use LinkChecker for Videos", "type": "bool", "default": "false" }, { "id": "ca_api_key", "label": "External Sources Search API Key", "type": "text", "option": "hidden", "secure": "true", "default": "UTBFeU1ERTM=" }, { "id": "control_all_uc_api_key", "label": "All UC API Key (Signup: http://accounts.alluc.com/)", "type": "text", "option": "hidden", "secure": "true", "default": "" }, { "id": "use_openload_pairing", "label": "Use OpenLoad (PhantomJS is primary, API Login:Key is secondary followed by Pairing. Disabled uses USS as primary)", "type": "bool", "default": "true" }, { "id": "control_openload_api_key", "label": "Openload API Key enter as Login:Key (Signup: https://openload.co/register)", "type": "text", "option": "hidden", "secure": "true", "default": "" }, { "id": "use_phantomjs", "label": "Use PhantomJS (Binary download required)", "type": "bool", "default": "false" }, { "id": "plextv", "label": "Auth Admin through Plex.tv (else use localhost)", "type": "bool", "default": "false" }, { "id": "use_debug", "label": "Enable Debug Mode (might show IP and computer Username in Logs)", "type": "bool", "default": "false" } ] \ No newline at end of file diff --git a/Contents/Libraries/Shared/resources/lib/libraries/client.py b/Contents/Libraries/Shared/resources/lib/libraries/client.py index 9fae9ff..5bc4180 100644 --- a/Contents/Libraries/Shared/resources/lib/libraries/client.py +++ b/Contents/Libraries/Shared/resources/lib/libraries/client.py @@ -219,8 +219,8 @@ def redirect_request(self, req, fp, code, msg, headers, newurl): if response.code == 503: #Log("AAAA- CODE %s|%s " % (url, response.code)) if 'cf-browser-verification' in content: - print("CF-OK") - + control.log('cf-browser-verification: CF-OK') + netloc = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc) #cf = cache.get(cfcookie, 168, netloc, headers['User-Agent'], timeout) cfc = cfcookie() @@ -350,6 +350,41 @@ def redirect_request(self, req, fp, code, msg, headers, newurl): setIP6() return +def simpleCheck(link, headers={}, cookie={}, retError=False, retry429=False, cl=3): + try: + code = '0' + size = '0' + red_url = None + r = requests.get(link, headers=headers, cookies=cookie, stream=True, verify=False, allow_redirects=True) + + if retry429 == True: + c = 0 + while r.status_code == 429 and c < cl: + time.sleep(5) + r = requests.get(link, stream=True, verify=False, allow_redirects=True) + c += 1 + + if str(r.status_code) not in HTTP_GOOD_RESP_CODES and str(r.status_code) not in GOOGLE_HTTP_GOOD_RESP_CODES_1: + raise Exception('HTTP Response: %s' % str(r.status_code)) + size = r.headers['Content-length'] + red_url = r.url + code = str(r.status_code) + r.close() + + #site = urllib.urlopen(link) + #meta = site.info() + #size = meta.getheaders("Content-Length")[0] + + if retError == True: + return code, red_url, size, '' + else: + return code, red_url, size + except Exception as e: + if retError == True: + return code, red_url, size, '{}'.format(e) + else: + return code, red_url, size + def getFileSize(link, retError=False, retry429=False, cl=3): try: r = requests.get(link, stream=True, verify=False, allow_redirects=True) diff --git a/Contents/Libraries/Shared/resources/lib/libraries/control.py b/Contents/Libraries/Shared/resources/lib/libraries/control.py index ea50e8e..a0add37 100644 --- a/Contents/Libraries/Shared/resources/lib/libraries/control.py +++ b/Contents/Libraries/Shared/resources/lib/libraries/control.py @@ -26,6 +26,8 @@ fanarttv_key = base64.urlsafe_b64decode('YTc4YzhmZWRjN2U3NTE1MjRkMzkyNmNhMmQyOTU3OTg=') trakt_key = base64.urlsafe_b64decode('NDFjYzI1NjY5Y2Y2OTc0NTg4ZjA0MTMxYjcyZjc4MjEwMzdjY2I1ZTdlMjMzNDVjN2MxZTk3NGI4MGI5ZjI1NQ==') trakt_secret = base64.urlsafe_b64decode('Y2I4OWExYTViN2ZlYmJiMDM2NmQ3Y2EyNzJjZDc4YTU5MWQ1ODI2Y2UyMTQ1NWVmYzE1ZDliYzQ1ZWNjY2QyZQ==') +all_uc_api = 'WXpFeE1qZzROV0k0WTJWall6Rm1aR1ZtWlRNNU1tVXdaR1E1WlRneVlqRT0=' +openload_api = 'WW1ReU9USmxNalkzTjJZd016RTFOenBmWjNnMU5GTkROUT09' loggertxt = [] setting_dict = {} @@ -39,9 +41,9 @@ def setting(key): def set_setting(key, value): if key == base64.b64decode('Y29udHJvbF9hbGxfdWNfYXBpX2tleQ==') and (value == None or value == '' or len(value) == 0): - value = base64.b64decode(base64.b64decode('WXpFeE1qZzROV0k0WTJWall6Rm1aR1ZtWlRNNU1tVXdaR1E1WlRneVlqRT0=')) + value = base64.b64decode(base64.b64decode(all_uc_api)) elif key == base64.b64decode('Y29udHJvbF9vcGVubG9hZF9hcGlfa2V5') and (value == None or value == '' or len(value) == 0 or ':' not in value): - value = base64.b64decode(base64.b64decode('WW1ReU9USmxNalkzTjJZd016RTFOenBmWjNnMU5GTkROUT09')) + value = base64.b64decode(base64.b64decode(openload_api)) setting_dict[key] = value diff --git a/Contents/Libraries/Shared/resources/lib/proxies/__init__.py b/Contents/Libraries/Shared/resources/lib/proxies/__init__.py index 3090626..8e4b0ee 100644 --- a/Contents/Libraries/Shared/resources/lib/proxies/__init__.py +++ b/Contents/Libraries/Shared/resources/lib/proxies/__init__.py @@ -18,8 +18,8 @@ def init(): try: c = __import__(name, globals(), locals(), [], -1).proxy() log("Adding Proxy %s : %s to Interface" % (c.name, c.base_link)) - sourceProxies.append({'name': c.name, 'url': c.base_link, 'captcha':c.captcha, 'SSL':c.ssl, 'working':c.working, 'speed':round(c.speedtest,3)}) - sourceProxiesCaller.append({'name': c.name, 'url': c.base_link, 'captcha':c.captcha, 'working':c.working, 'speed':round(c.speedtest,3), 'call': c}) + sourceProxies.append({'name': c.name, 'url': c.base_link, 'captcha':c.captcha, 'SSL':c.ssl, 'working':c.working, 'speed':round(c.speedtest,3), 'ver':c.ver, 'date':c.update_date}) + sourceProxiesCaller.append({'name': c.name, 'url': c.base_link, 'captcha':c.captcha, 'working':c.working, 'speed':round(c.speedtest,3), 'ver':c.ver, 'date':c.update_date, 'call': c}) except Exception as e: log(type='CRITICAL', err='Could not import %s > %s' % (name,e)) diff --git a/Contents/Libraries/Shared/resources/lib/proxies/xperienc.py b/Contents/Libraries/Shared/resources/lib/proxies/xperienc.py index c9f0a32..b0ea190 100644 --- a/Contents/Libraries/Shared/resources/lib/proxies/xperienc.py +++ b/Contents/Libraries/Shared/resources/lib/proxies/xperienc.py @@ -14,7 +14,9 @@ class proxy: def __init__(self): del loggertxt[:] - log(type='INFO', method='init', err=' -- Initializing %s Start --' % name) + self.ver = '0.0.1' + self.update_date = 'Nov. 13, 2017' + log(type='INFO', method='init', err=' -- Initializing %s %s %s Start --' % (name, self.ver, self.update_date)) self.base_link = 'https://www.xperienc.com' self.name = name self.loggertxt = [] @@ -24,7 +26,7 @@ def __init__(self): self.speedtest = 0 self.headers = {'Connection' : 'keep-alive', 'User-Agent' : client.randomagent()} self.working = self.testSite() - log(type='INFO', method='init', err=' -- Initializing %s End --' % name) + log(type='INFO', method='init', err=' -- Initializing %s %s %s End --' % (name, self.ver, self.update_date)) def getLog(self): self.loggertxt = loggertxt diff --git a/Contents/Libraries/Shared/resources/lib/resolvers/__init__.py b/Contents/Libraries/Shared/resources/lib/resolvers/__init__.py index 6cfb574..aee0088 100644 --- a/Contents/Libraries/Shared/resources/lib/resolvers/__init__.py +++ b/Contents/Libraries/Shared/resources/lib/resolvers/__init__.py @@ -52,6 +52,8 @@ def init(): log(type='CRITICAL', err='Could not import %s > %s (Retry-Failed)' % (name,e)) error_info = { 'name': name, + 'ver': '0.0.0', + 'date': 'Jan. 01, 2000', 'class': name, 'speed': 0, 'netloc': name, diff --git a/Contents/Libraries/Shared/resources/lib/resolvers/host_gvideo.py b/Contents/Libraries/Shared/resources/lib/resolvers/host_gvideo.py index 3bb5839..836afb2 100644 --- a/Contents/Libraries/Shared/resources/lib/resolvers/host_gvideo.py +++ b/Contents/Libraries/Shared/resources/lib/resolvers/host_gvideo.py @@ -56,9 +56,9 @@ '59': 'mp4' } -CONTAINER_KEYS = ['flv','mp4','3gp','webm','mkv','ftypisom','matroska'] +CONTAINER_KEYS = ['flv','mp4','3gp','webm','mkv','ftypisom','matroska','ftypmp42', 'isommp42', 'lmvhd'] -FMOVIES_SERVER_MAP = {'Server F4':' Google-F4 (blogspot.com)', 'Server G1':'Google-G1 (googleapis.com)', 'Server G2':'Google-G2 (googleapis.com)', 'Server G3':'Google-G3 (googleapis.com)', 'Server G4':'Google-G4 (googleapis.com)'} +FMOVIES_SERVER_MAP = {'Server F1':' Google-F1 (blogspot.com)','Server F2':' Google-F2 (blogspot.com)','Server F3':' Google-F3 (blogspot.com)','Server F4':' Google-F4 (blogspot.com)', 'Server G1':'Google-G1 (googleapis.com)', 'Server G2':'Google-G2 (googleusercontent.com)', 'Server G3':'Google-G3 (googleusercontent.com)', 'Server G4':'Google-G4 (googleapis.com)'} name = 'gvideo' loggertxt = [] @@ -66,7 +66,9 @@ class host: def __init__(self): del loggertxt[:] - log(type='INFO', method='init', err=' -- Initializing %s Start --' % name) + self.ver = '0.0.1' + self.update_date = 'Nov. 13, 2017' + log(type='INFO', method='init', err=' -- Initializing %s %s %s Start --' % (name, self.ver, self.update_date)) self.init = False self.logo = 'http://i.imgur.com/KYtgDP6.png' self.name = 'gvideo' @@ -88,11 +90,13 @@ def __init__(self): #self.checkGetLinkAPI() self.UA = client.USER_AGENT self.init = True - log(type='INFO', method='init', err=' -- Initializing %s End --' % name) + log(type='INFO', method='init', err=' -- Initializing %s %s %s End --' % (name, self.ver, self.update_date)) def info(self): return { 'name': self.name, + 'ver': self.ver, + 'date': self.update_date, 'class': self.name, 'speed': round(self.speedtest,3), 'netloc': self.netloc, @@ -346,7 +350,7 @@ def getVideoMetaData(url, httpsskip=False): print 'ERROR: %s' % e return res -def check(url, videoData=None, headers=None, cookie=None, doPrint=False, httpsskip=False): +def check(url, videoData=None, headers=None, cookie=None, doPrint=True, httpsskip=False): try: if 'google.com/file' in url: if videoData==None: @@ -363,27 +367,40 @@ def check(url, videoData=None, headers=None, cookie=None, doPrint=False, httpssk log('FAIL', 'check', 'status == fail') return (False, videoData) else: - http_res, red_url = client.request(url=url, output='responsecodeext', followredirect=True, headers=headers, cookie=cookie, IPv4=True, httpsskip=httpsskip) - key_found = False - if http_res in client.HTTP_GOOD_RESP_CODES or http_res in client.GOOGLE_HTTP_GOOD_RESP_CODES_1: - chunk = client.request(url=red_url, output='chunk', headers=headers, cookie=cookie, IPv4=True, httpsskip=httpsskip) # dont use web-proxy when retrieving chunk - if doPrint: - print "url --- %s" % red_url - print "chunk --- %s" % chunk[0:20] - - for key in CONTAINER_KEYS: - if key.lower() in str(chunk[0:20]).lower(): - key_found = True - break - else: - log('FAIL', 'check', 'HTTP Resp:%s for url: %s' % (http_res, url)) - return (False, videoData) - if key_found == False: - log('FAIL', 'check', 'keyword in chunk not found : %s --- Chunk: %s' % (url,chunk[0:20])) - return (False, videoData) + try: + key_found = False + http_res, red_url = client.request(url=url, output='responsecodeext', followredirect=True, headers=headers, cookie=cookie, IPv4=True, httpsskip=httpsskip) + if http_res in client.HTTP_GOOD_RESP_CODES or http_res in client.GOOGLE_HTTP_GOOD_RESP_CODES_1: + chunk = client.request(url=red_url, output='chunk', headers=headers, cookie=cookie, IPv4=True, httpsskip=httpsskip) # dont use web-proxy when retrieving chunk + if doPrint: + print "url --- %s" % red_url + print "chunk --- %s" % chunk[0:50] + + for key in CONTAINER_KEYS: + try: + if key.lower() in str(chunk[0:20]).lower(): + key_found = True + break + if key.lower() in str(chunk[0:50]).lower(): + key_found = True + break + except: + pass + else: + log('FAIL', 'check', 'HTTP Resp:%s for url: %s' % (http_res, url)) + return (False, videoData) + if key_found == False: + log('FAIL', 'check', 'keyword in chunk not found : %s --- Chunk: %s' % (url,chunk[0:20])) + return (False, videoData) + except: + http_res, red_url, sz = client.simpleCheck(url, headers=headers, cookie=cookie) + if http_res not in client.HTTP_GOOD_RESP_CODES and http_res not in client.GOOGLE_HTTP_GOOD_RESP_CODES_1: + log('FAIL', 'check', 'HTTP Resp:%s for url: %s' % (http_res, url)) + return (False, videoData) return (True, videoData) - except: + except Exception as e: + log('ERROR', 'check', '%s' % e, dolog=doPrint) return (False, videoData) def getFileLink(id, httpsskip=False): diff --git a/Contents/Libraries/Shared/resources/lib/resolvers/host_mega.py b/Contents/Libraries/Shared/resources/lib/resolvers/host_mega.py index 7312deb..800c50c 100644 --- a/Contents/Libraries/Shared/resources/lib/resolvers/host_mega.py +++ b/Contents/Libraries/Shared/resources/lib/resolvers/host_mega.py @@ -100,12 +100,14 @@ 'Connection': 'keep-alive'} name = 'mega' +ver = '0.0.1' +update_date = 'Nov. 13, 2017' loggertxt = [] class host: def __init__(self): del loggertxt[:] - log(type='INFO', method='init', err=' -- Initializing %s Start --' % name) + log(type='INFO', method='init', err=' -- Initializing %s %s %s Start --' % (name,ver,update_date)) self.init = False self.msg = '' if crypto_msg != None: @@ -132,7 +134,7 @@ def __init__(self): self.resolver = False self.working = False self.init = True - log(type='INFO', method='init', err=' -- Initializing %s End --' % name) + log(type='INFO', method='init', err=' -- Initializing %s %s %s End --' % (name,ver,update_date)) def info(self): return { diff --git a/Contents/Libraries/Shared/resources/lib/resolvers/host_openload.py b/Contents/Libraries/Shared/resources/lib/resolvers/host_openload.py index 2d2f535..4bf4430 100644 --- a/Contents/Libraries/Shared/resources/lib/resolvers/host_openload.py +++ b/Contents/Libraries/Shared/resources/lib/resolvers/host_openload.py @@ -89,7 +89,9 @@ class DecodeError(Exception): class host: def __init__(self): del loggertxt[:] - log(type='INFO', method='init', err=' -- Initializing %s Start --' % name) + self.ver = '0.0.1' + self.update_date = 'Nov. 13, 2017' + log(type='INFO', method='init', err=' -- Initializing %s %s %s Start --' % (name, self.ver, self.update_date)) self.init = False self.logo = 'http://i.imgur.com/OM7VzQs.png' self.name = 'openload' @@ -112,11 +114,13 @@ def __init__(self): self.working = True self.resolver = self.testResolver() self.init = True - log(type='INFO', method='init', err=' -- Initializing %s End --' % name) + log(type='INFO', method='init', err=' -- Initializing %s %s %s End --' % (name, self.ver, self.update_date)) def info(self): return { 'name': self.name, + 'ver': self.ver, + 'date': self.update_date, 'class': self.name, 'speed': round(self.speedtest,3), 'netloc': self.netloc, diff --git a/Contents/Libraries/Shared/resources/lib/resolvers/host_youtube.py b/Contents/Libraries/Shared/resources/lib/resolvers/host_youtube.py index 390c031..c3c4f8a 100644 --- a/Contents/Libraries/Shared/resources/lib/resolvers/host_youtube.py +++ b/Contents/Libraries/Shared/resources/lib/resolvers/host_youtube.py @@ -48,7 +48,9 @@ class host: def __init__(self): del loggertxt[:] - log(type='INFO', method='init', err=' -- Initializing %s Start --' % name) + self.ver = '0.0.1' + self.update_date = 'Nov. 13, 2017' + log(type='INFO', method='init', err=' -- Initializing %s %s %s Start --' % (name, self.ver, self.update_date)) self.init = False self.logo = 'http://i.imgur.com/qZUP77r.png' self.name = name @@ -67,11 +69,13 @@ def __init__(self): self.resolver = self.testResolver() self.msg = '' self.init = True - log(type='INFO', method='init', err=' -- Initializing %s End --' % name) + log(type='INFO', method='init', err=' -- Initializing %s %s %s End --' % (name, self.ver, self.update_date)) def info(self): return { 'name': self.name, + 'ver': self.ver, + 'date': self.update_date, 'class': self.name, 'speed': round(self.speedtest,3), 'netloc': self.netloc, diff --git a/Contents/Libraries/Shared/resources/lib/sources/alluc_mv_tv.py b/Contents/Libraries/Shared/resources/lib/sources/alluc_mv_tv.py index fef8ed6..45ebd2f 100644 --- a/Contents/Libraries/Shared/resources/lib/sources/alluc_mv_tv.py +++ b/Contents/Libraries/Shared/resources/lib/sources/alluc_mv_tv.py @@ -36,8 +36,11 @@ class source: def __init__(self): del loggertxt[:] - log(type='INFO', method='init', err=' -- Initializing %s Start --' % name) + self.ver = '0.0.1' + self.update_date = 'Nov. 14, 2017' + log(type='INFO', method='init', err=' -- Initializing %s %s %s Start --' % (name, self.ver, self.update_date)) self.init = False + self.count = 2 self.domains = ['alluc.ee','alluc.com'] self.base_link = 'https://www.alluc.ee' self.moviesearch_link = '' @@ -59,7 +62,13 @@ def __init__(self): self.siteonline = self.testSite() self.testparser = self.testParser() self.init = True - log(type='INFO', method='init', err=' -- Initializing %s End --' % name) + if control.setting('control_all_uc_api_key') == control.base64.b64decode(control.base64.b64decode(control.all_uc_api)): + log(type='INFO', method='init', err='Using Plugin (Non-User) Set API Key - Count is set at 2') + self.count = 2 + else: + log(type='INFO', method='init', err='Using User Set API Key - Count is set at 10') + self.count = 10 + log(type='INFO', method='init', err=' -- Initializing %s %s %s End --' % (name, self.ver, self.update_date)) def info(self): msg = self.error @@ -150,11 +159,11 @@ def get_movie(self, imdb, title, year, proxy_options=None, key=None): if control.setting('control_all_uc_api_key'): if control.setting('realdebrid_token') or control.setting('premiumize_user'): - self.moviesearch_link = '/api/search/download?user=%s&password=%s&query=%s+%s' + self.moviesearch_link = '/api/search/download?user=%s&password=%s&query=%s+%s&count=%s' else: - self.moviesearch_link = '/api/search/stream/?apikey=%s&query=%s+%s' + self.moviesearch_link = '/api/search/stream/?apikey=%s&query=%s+%s&count=%s' - url = self.moviesearch_link % (control.setting('control_all_uc_api_key'),cleantitle.geturl(title), year) + url = self.moviesearch_link % (control.setting('control_all_uc_api_key'),cleantitle.geturl(title), year, str(self.count)) r = urlparse.urljoin(self.base_link, url) xr = r + "+%23newlinks" @@ -227,8 +236,8 @@ def get_movie(self, imdb, title, year, proxy_options=None, key=None): stream_url.append({'url': tmp, 'hoster': item['hostername'], 'title': xtitle, 'lang':lang, 'src':src, 'ext':ext}) if USE_MEGA_SPECIFIC_SEARCH == True and self.init == True and control.setting('Host-mega') != False: - self.moviesearch_link = '/api/search/download?apikey=%s&query=%s+%s' - url = self.moviesearch_link % (control.setting('control_all_uc_api_key'),cleantitle.geturl(title), year) + self.moviesearch_link = '/api/search/download?apikey=%s&query=%s+%s&count=%s' + url = self.moviesearch_link % (control.setting('control_all_uc_api_key'),cleantitle.geturl(title), year, str(self.count)) r = urlparse.urljoin(self.base_link, url) r = r + "+host%3Amega.nz" r = r + "+%23newlinks" @@ -279,16 +288,16 @@ def get_episode(self, url=None, imdb=None, tvdb=None, title=None, year=None, sea stream_url = [] if control.setting('control_all_uc_api_key'): if control.setting('realdebrid_token') or control.setting('premiumize_user'): - self.moviesearch_link = '/api/search/download?user=%s&password=%s&query=%s' + self.moviesearch_link = '/api/search/download?user=%s&password=%s&query=%s&count=%s' else: - self.moviesearch_link = '/api/search/stream/?apikey=%s&query=%s' + self.moviesearch_link = '/api/search/stream/?apikey=%s&query=%s&count=%s' tvshowtitle, year = re.compile('(.+?) [(](\d{4})[)]$').findall(url)[0] season = str(season) episode = str(episode) season, episode = season.zfill(2), episode.zfill(2) query = '%s s%se%s' % (tvshowtitle, season, episode) - query = self.moviesearch_link % (control.setting('control_all_uc_api_key'), urllib.quote_plus(query)) + query = self.moviesearch_link % (control.setting('control_all_uc_api_key'), urllib.quote_plus(query), str(self.count)) r = urlparse.urljoin(self.base_link, query) xr = r + "+%23newlinks" #r = requests.get(r).json() @@ -362,9 +371,9 @@ def get_episode(self, url=None, imdb=None, tvdb=None, title=None, year=None, sea stream_url.append({'url': tmp, 'hoster': item['hostername'], 'title': xtitle, 'lang':lang, 'src':src, 'ext':ext}) if USE_MEGA_SPECIFIC_SEARCH == True and self.init == True and control.setting('Host-mega') != False: - self.moviesearch_link = '/api/search/download?apikey=%s&query=%s' + self.moviesearch_link = '/api/search/download?apikey=%s&query=%s&count=%s' query = '%s s%se%s' % (tvshowtitle, season, episode) - query = self.moviesearch_link % (control.setting('control_all_uc_api_key'), urllib.quote_plus(query)) + query = self.moviesearch_link % (control.setting('control_all_uc_api_key'), urllib.quote_plus(query), str(self.count)) r = urlparse.urljoin(self.base_link, query) xr = r + "+%23newlinks" r = xr + "+host%3Amega.nz" @@ -401,20 +410,20 @@ def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_o log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) return sources + processed = [] for link in url: - if re.match('((?!\.part[0-9]).)*$', link['url'], flags=re.IGNORECASE) and '://' in link['url']: + if re.match('((?!\.part[0-9]).)*$', link['url'], flags=re.IGNORECASE) and '://' in link['url'] and link['url'] not in processed: host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(link['url'].strip().lower()).netloc)[0].split('.')[0] scheme = urlparse.urlparse(link['url']).scheme #if host in hostDict and scheme: if scheme: - if '1080' in link["url"] or '1080' in link['url']: - quality = "1080p" + if '1080' in link['title'] or '1080' in link['url']: + quality = '1080p' elif '720' in link['title'] or '720' in link['url']: quality = 'HD' else: quality = 'SD' - #sources.append({ 'source' : host, 'quality' : quality, 'provider': 'alluc', 'url': link['url'] }) - + file_ext = '.mp4' if len(link['ext']) > 0 and len(link['ext']) < 4 and len(link['src']) > 0: txt = '%s (.%s)' % (link['src'],link['ext']) @@ -426,7 +435,13 @@ def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_o txt = '%s' % link['src'] else: txt = '' - sources = resolvers.createMeta(link['url'], self.name, self.logo, quality, sources, key, lang=link['lang'], txt=txt, file_ext=file_ext, testing=testing) + + if 'trailer' in link['title'].lower(): + sources = resolvers.createMeta(link['url'], self.name, self.logo, quality, sources, key, lang=link['lang'], txt=txt, file_ext=file_ext, vidtype='Trailer', testing=testing) + else: + sources = resolvers.createMeta(link['url'], self.name, self.logo, quality, sources, key, lang=link['lang'], txt=txt, file_ext=file_ext, testing=testing) + + processed.append(link['url']) if self.fetchedtoday > 0: self.msg = 'Fetched today: %s' % str(self.fetchedtoday) diff --git a/Contents/Libraries/Shared/resources/lib/sources/cyro_ca.py b/Contents/Libraries/Shared/resources/lib/sources/cyro_ca.py index c6833f7..3bc0cd3 100644 --- a/Contents/Libraries/Shared/resources/lib/sources/cyro_ca.py +++ b/Contents/Libraries/Shared/resources/lib/sources/cyro_ca.py @@ -33,7 +33,9 @@ class source: def __init__(self): del loggertxt[:] - log(type='INFO', method='init', err=' -- Initializing %s Start --' % name) + self.ver = '0.0.1' + self.update_date = 'Nov. 13, 2017' + log(type='INFO', method='init', err=' -- Initializing %s %s %s Start --' % (name, self.ver, self.update_date)) self.init = False self.base_link = 'http://xpau.se' self.MainPageValidatingContent = 'movies' @@ -55,7 +57,7 @@ def __init__(self): self.testparser = 'Unknown' self.testparser = self.testParser() self.init = True - log(type='INFO', method='init', err=' -- Initializing %s End --' % name) + log(type='INFO', method='init', err=' -- Initializing %s %s %s End --' % (name, self.ver, self.update_date)) def info(self): return { diff --git a/Contents/Libraries/Shared/resources/lib/sources/fmovies_ca.py b/Contents/Libraries/Shared/resources/lib/sources/fmovies_ca.py index 129cb36..bb6e9a4 100644 --- a/Contents/Libraries/Shared/resources/lib/sources/fmovies_ca.py +++ b/Contents/Libraries/Shared/resources/lib/sources/fmovies_ca.py @@ -15,7 +15,9 @@ class source: def __init__(self): del loggertxt[:] - log(type='INFO', method='init', err=' -- Initializing %s Start --' % name) + self.ver = '0.0.1' + self.update_date = 'Nov. 13, 2017' + log(type='INFO', method='init', err=' -- Initializing %s %s %s Start --' % (name, self.ver, self.update_date)) self.init = False self.base_link_alts = ['https://www.fmovies.io','https://www4.fmovies.io'] self.base_link = self.base_link_alts[0] @@ -41,7 +43,7 @@ def __init__(self): self.testparser = 'Unknown' self.testparser = self.testParser() self.init = True - log(type='INFO', method='init', err=' -- Initializing %s End --' % name) + log(type='INFO', method='init', err=' -- Initializing %s %s %s End --' % (name, self.ver, self.update_date)) def info(self): return { diff --git a/Contents/Libraries/Shared/resources/lib/sources/fmovies_mv_tv.py b/Contents/Libraries/Shared/resources/lib/sources/fmovies_mv_tv.py index 0b574ee..27fea2e 100644 --- a/Contents/Libraries/Shared/resources/lib/sources/fmovies_mv_tv.py +++ b/Contents/Libraries/Shared/resources/lib/sources/fmovies_mv_tv.py @@ -37,11 +37,13 @@ class source: def __init__(self): del loggertxt[:] - log(type='INFO', method='init', err=' -- Initializing %s Start --' % name) + self.ver = '0.0.1' + self.update_date = 'Nov. 13, 2017' + log(type='INFO', method='init', err=' -- Initializing %s %s %s Start --' % (name, self.ver, self.update_date)) self.init = False self.disabled = False self.TOKEN_KEY = [] - self.base_link_alts = ['https://bmovies.to'] #['https://fmovies.to','https://fmovies.is','https://fmovies.se'] + self.base_link_alts = ['https://bmovies.to','https://bmovies.pro'] #['https://fmovies.to','https://fmovies.is','https://fmovies.se'] self.base_link = self.base_link_alts[0] self.grabber_api = "grabber-api/" self.search_link = '/sitemap' @@ -68,7 +70,7 @@ def __init__(self): self.testparser = self.testParser() self.initAndSleepThread() self.init = True - log(type='INFO', method='init', err=' -- Initializing %s End --' % name) + log(type='INFO', method='init', err=' -- Initializing %s %s %s End --' % (name, self.ver, self.update_date)) def info(self): return { diff --git a/Contents/Libraries/Shared/resources/lib/sources/gogoanime.py b/Contents/Libraries/Shared/resources/lib/sources/gogoanime.py index f500f4c..284f633 100644 --- a/Contents/Libraries/Shared/resources/lib/sources/gogoanime.py +++ b/Contents/Libraries/Shared/resources/lib/sources/gogoanime.py @@ -34,7 +34,9 @@ class source: def __init__(self): del loggertxt[:] - log(type='INFO', method='init', err=' -- Initializing %s Start --' % name) + self.ver = '0.0.1' + self.update_date = 'Nov. 13, 2017' + log(type='INFO', method='init', err=' -- Initializing %s %s %s Start --' % (name, self.ver, self.update_date)) self.init = False self.priority = 1 self.disabled = False @@ -61,7 +63,7 @@ def __init__(self): self.testparser = 'Unknown' self.testparser = self.testParser() self.init = True - log(type='INFO', method='init', err=' -- Initializing %s End --' % name) + log(type='INFO', method='init', err=' -- Initializing %s %s %s End --' % (name, self.ver, self.update_date)) def info(self): return { @@ -165,7 +167,7 @@ def get_show(self, tvshowtitle, season, imdb=None, tvdb=None, year=None, proxy_o return None t = cleantitle.get(tvshowtitle) - + year = '%s' % year q = urlparse.urljoin(self.base_link, self.search_link) q = q % urllib.quote_plus(tvshowtitle) @@ -220,35 +222,44 @@ def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_o url = urlparse.urljoin(self.base_link, url) #r = client.request(url) - r = proxies.request(url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) + req = proxies.request(url, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) - r = client.parseDOM(r, 'iframe', ret='src') - + r = client.parseDOM(req, 'iframe', ret='src') + try: + r2 = re.findall('data-video=\"(.*?)\"', req) + for r2_i in r2: + r.append(r2_i) + except: + pass + links = [] for u in r: try: if 'http' not in u: u = 'http:' + u - if not u.startswith('http') and not 'vidstreaming' in u: raise Exception() - #url = client.request(u) - url = proxies.request(u, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) - - url = client.parseDOM(url, 'source', ret='src') + if u.startswith('http') == True: + if 'vidstreaming' in u: + #url = client.request(u) + url = proxies.request(u, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, IPv4=True) + + url = client.parseDOM(url, 'source', ret='src') + else: + url = [u] - for i in url: - #try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) - #except: pass - - try: - qualityt = client.googletag(i)[0]['quality'] - except: - qualityt = u'720p' - try: - links = resolvers.createMeta(i, self.name, self.logo, qualityt, links, key, vidtype='Show', testing=testing) - except: - pass + for i in url: + #try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) + #except: pass + + try: + qualityt = client.googletag(i)[0]['quality'] + except: + qualityt = u'720p' + try: + links = resolvers.createMeta(i, self.name, self.logo, qualityt, links, key, vidtype='Show', testing=testing) + except: + pass except: pass diff --git a/Contents/Libraries/Shared/resources/lib/sources/nineanime_ca.py b/Contents/Libraries/Shared/resources/lib/sources/nineanime_ca.py new file mode 100644 index 0000000..b98338a --- /dev/null +++ b/Contents/Libraries/Shared/resources/lib/sources/nineanime_ca.py @@ -0,0 +1,650 @@ +# -*- coding: utf-8 -*- + +''' + Specto Add-on + Copyright (C) 2015 lambda + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . +''' + + +import re,urllib,urlparse,json,random,time,base64 +from resources.lib.libraries import control +from resources.lib.libraries import cleantitle +from resources.lib.libraries import client +from resources.lib.libraries import jsfdecoder +from resources.lib.libraries import jsunpack +from resources.lib.libraries import testparams +from resources.lib.libraries import workers +from resources.lib import resolvers +from resources.lib import proxies +from __builtin__ import ord, format, eval + +name = '9anime' +loggertxt = [] + +class source: + def __init__(self): + del loggertxt[:] + self.ver = '0.0.1' + self.update_date = 'Nov. 13, 2017' + log(type='INFO', method='init', err=' -- Initializing %s %s %s Start --' % (name, self.ver, self.update_date)) + self.init = False + self.disabled = False + self.TOKEN_KEY = [] + self.base_link_alts = ['https://9anime.is','https://9anime.to'] + self.base_link = self.base_link_alts[0] + self.grabber_api = "grabber-api/" + self.search_link = '/sitemap' + self.ALL_JS = "/assets/min/frontend/all.js" + self.TOKEN_KEY_PASTEBIN_URL = "https://pastebin.com/raw/VNn1454k" + self.hash_link = '/ajax/episode/info' + self.hash_menu_link = "/user/ajax/menu-bar" + self.token_link = "/token" + self.MainPageValidatingContent = ['9anime'] + self.type_filter = ['anime'] + self.ssl = False + self.name = name + self.headers = {} + self.cookie = None + self.loggertxt = [] + self.logo = 'https://i.imgur.com/6PsTdOZ.png' + self.speedtest = 0 + if len(proxies.sourceProxies)==0: + proxies.init() + self.proxyrequired = False + self.msg = '' + self.siteonline = self.testSite() + self.testparser = 'Unknown' + self.testparser = self.testParser() + self.initAndSleepThread() + self.init = True + log(type='INFO', method='init', err=' -- Initializing %s %s %s End --' % (name, self.ver, self.update_date)) + + def info(self): + return { + 'url': self.base_link, + 'name': self.name, + 'msg' : self.msg, + 'speed': round(self.speedtest,3), + 'logo': self.logo, + 'ssl' : self.ssl, + 'online': self.siteonline, + 'online_via_proxy' : self.proxyrequired, + 'parser': self.testparser + } + + def getLog(self): + self.loggertxt = loggertxt + return self.loggertxt + + def testSite(self): + for site in self.base_link_alts: + bool = self.testSiteAlts(site) + if bool == True: + return bool + + self.base_link = self.base_link_alts[0] + return False + + def testSiteAlts(self, site): + try: + self.base_link = site + if self.disabled: + log('INFO','testSite', 'Plugin Disabled') + return False + self.initAndSleep() + x1 = time.time() + http_res, content = proxies.request(url=site, headers=self.headers, output='response', use_web_proxy=False, httpsskip=True) + self.speedtest = time.time() - x1 + for valcon in self.MainPageValidatingContent: + if content != None and content.find(valcon) >-1: + log('SUCCESS', 'testSite', 'HTTP Resp : %s for %s' % (http_res,site)) + return True + log('FAIL', 'testSite', 'Validation content Not Found. HTTP Resp : %s for %s' % (http_res,site)) + return False + except Exception as e: + log('ERROR','testSite', '%s' % e) + return False + + def initAndSleepThread(self): + thread_i = workers.Thread(self.InitSleepThread) + thread_i.start() + + def InitSleepThread(self): + while True: + time.sleep(60*60) # 1 hr + self.initAndSleep() + + def initAndSleep(self): + try: + self.TOKEN_KEY = [] + self.getVidToken() + if len(self.TOKEN_KEY) > 0: + log('SUCCESS', 'initAndSleep', 'Vid Token: %s' % client.b64encode(self.TOKEN_KEY[0])) + else: + log('FAIL', 'initAndSleep', 'Vid Token Not retrieved !') + + t_base_link = self.base_link + self.headers = {'X-Requested-With': 'XMLHttpRequest'} + self.headers['Referer'] = t_base_link + ua = client.randomagent() + self.headers['User-Agent'] = ua + + #get cf cookie + cookie1 = proxies.request(url=t_base_link, headers=self.headers, output='cookie', use_web_proxy=self.proxyrequired, httpsskip=True) + self.headers['Cookie'] = cookie1 + + # get reqkey cookie + try: + token_url = urlparse.urljoin(t_base_link, self.token_link) + r1 = proxies.request(token_url, headers=self.headers, httpsskip=True) + reqkey = self.decodeJSFCookie(r1) + except: + reqkey = '' + + # get session cookie + serverts = str(((int(time.time())/3600)*3600)) + query = {'ts': serverts} + try: + tk = self.__get_token(query) + except: + tk = self.__get_token(query, True) + + query.update(tk) + hash_url = urlparse.urljoin(t_base_link, self.hash_menu_link) + hash_url = hash_url + '?' + urllib.urlencode(query) + + r1, headers, content, cookie2 = proxies.request(hash_url, headers=self.headers, limit='0', output='extended', httpsskip=True) + + #cookie = cookie1 + '; ' + cookie2 + '; user-info=null; reqkey=' + reqkey + cookie = '%s; %s; user-info=null; reqkey=%s' % (cookie1 , cookie2 , reqkey) + + self.headers['Cookie'] = cookie + log('SUCCESS', 'initAndSleep', 'Cookies : %s for %s' % (cookie,self.base_link)) + except Exception as e: + log('ERROR','initAndSleep', '%s' % e) + + def testParser(self): + try: + if self.disabled == True: + log('INFO','testParser', 'Plugin Disabled - cannot test parser') + return False + if self.siteonline == False: + log('INFO', 'testParser', '%s is offline - cannot test parser' % self.base_link) + return False + + for show in testparams.test_shows: + geturl = self.get_show(tvshowtitle=show['title'], season=show['season'], year=show['year']) + geturl = self.get_episode(geturl, season=show['season'], episode=show['episode']) + links = self.get_sources(url=geturl, testing=True) + + if links != None and len(links) > 0: + log('SUCCESS', 'testParser', 'Parser is working') + return True + + log('FAIL', 'testParser', 'Parser NOT working') + return False + except Exception as e: + log('ERROR', 'testParser', '%s' % e) + return False + + def get_movie(self, imdb, title, year, proxy_options=None, key=None): + try: + if control.setting('Provider-%s' % name) == False: + log('INFO','get_movie','Provider Disabled by User') + return None + url = {'imdb': imdb, 'title': title, 'year': year} + url = urllib.urlencode(url) + #X - Requested - With:"XMLHttpRequest" + return url + except Exception as e: + log('ERROR', 'get_movie','%s: %s' % (title,e), dolog=self.init) + return + + def get_show(self, imdb=None, tvdb=None, tvshowtitle=None, year=None, season=None, proxy_options=None, key=None): + try: + if control.setting('Provider-%s' % name) == False: + log('INFO','get_show','Provider Disabled by User') + return None + url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'season': season} + url = urllib.urlencode(url) + return url + except Exception as e: + log('ERROR', 'get_show','%s: %s' % (tvshowtitle,e), dolog=self.init) + return + + def get_episode(self, url=None, imdb=None, tvdb=None, title=None, year=None, season=None, episode=None, proxy_options=None, key=None): + try: + if control.setting('Provider-%s' % name) == False: + return None + if url == None: return + url = urlparse.parse_qs(url) + url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) + url['title'], url['season'], url['episode'], url['premiered'] = title, season, episode, year + url = urllib.urlencode(url) + return url + except Exception as e: + log('ERROR', 'get_episode','%s: %s' % (title,e), dolog=self.init) + return + + def get_sources(self, url, hosthdDict=None, hostDict=None, locDict=None, proxy_options=None, key=None, testing=False): + try: + sources = [] + if url == None: + log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key), dolog=not testing) + return sources + + myts = str(((int(time.time())/3600)*3600)) + log('INFO','get_sources-1', 'url: %s' % url, dolog=False) + token_error = False + urls = [] + + if not str(url).startswith('http'): + try: + data = urlparse.parse_qs(url) + data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) + + title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] + + try: + year = re.findall('(\d{4})', data['premiered'])[0] if 'tvshowtitle' in data else data['year'] + except: + try: + year = data['year'] + except: + year = None + try: episode = data['episode'] + except: pass + + query = {'keyword': title} + search_url = urlparse.urljoin(self.base_link, '/search') + search_url = search_url + '?' + urllib.urlencode(query) + result = proxies.request(search_url, headers=self.headers, proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) + + log('INFO','get_sources-2', '%s' % search_url, dolog=False) + + rs = client.parseDOM(result, 'div', attrs = {'class': '[^"]*row[^"]*'})[0] + r = client.parseDOM(rs, 'div', attrs = {'class': 'item'}) + r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', attrs = {'class': 'name'})) for i in r] + r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0] + r = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in r] + + if 'season' in data: + r = [(i[0], re.sub(' \(\w*\)', '', i[1])) for i in r] + + possible_hits = [] + for i in r: + if cleantitle.get(title).lower() == cleantitle.get(i[1]).lower(): + possible_hits.append((i[0], [[i[1], u'1']])) + + #title += '%01d' % int(data['season']) + url = [(i[0], re.findall('(.+?) (\d+)$', i[1])) for i in r] + + for i in possible_hits: + url.append(i) + + url = [(i[0], i[1][0][0], i[1][0][1]) for i in url if len(i[1]) > 0] + + url = [i for i in url if cleantitle.get(title) in cleantitle.get(i[1])] + + url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2])] + + if len(url) == 0: + url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])] + if len(url) == 0: + url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]+str(season))] + else: + url = [i for i in r if cleantitle.get(title) in cleantitle.get(i[1])] + + if len(url) == 0: + log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key)) + return sources + + for urli in url: + url = urli[0] + url = urlparse.urljoin(self.base_link, url) + urls.append(url) + + except Exception as e: + raise Exception(e) + + for url in urls: + try: + try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0] + except: pass + + log('INFO','get_sources-3', url, dolog=False) + + referer = url + result = resultT = proxies.request(url, headers=self.headers, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) + + alina = client.parseDOM(result, 'title')[0] + + atr = [i for i in client.parseDOM(result, 'title') if len(re.findall('(\d{4})', i)) > 0][-1] + if 'season' in data: + years = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1)] + mychk = False + for y in years: + if y in atr: + mychk = True + result = result if mychk == True else None + if mychk == True: + break + else: + result = result if year in atr else None + + if result != None: + break + except Exception as e: + log('FAIL','get_sources-3', '%s : %s' % (url,e), dolog=False) + + if result == None: + log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key)) + return sources + + try: + myts = re.findall(r'data-ts="(.*?)"', result)[0] + except: + log('INFO','get_sources-3', 'could not parse ts ! will use generated one : %s' % myts, dolog=False) + + trailers = [] + links_m = [] + + if testing == False: + try: + matches = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+').findall(result) + for match in matches: + try: + if 'youtube.com' in match: + match = match.replace('embed/','watch?v=') + trailers.append(match) + except: + pass + except Exception as e: + pass + + for trailer in trailers: + links_m = resolvers.createMeta(trailer, self.name, self.logo, '720p', links_m, key, vidtype='Trailer', testing=testing) + + riptype = None + try: quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0].lower() + except: quality = 'hd' + if quality == 'cam' or quality == 'ts': + quality = '480p' + riptype = 'CAM' + elif quality == 'hd' or 'hd ' in quality: + quality = '720p' + riptype = 'BRRIP' + else: + quality = '480p' + riptype = 'BRRIP' + + result = client.parseDOM(result, 'ul', attrs = {'data-range-id':"0"}) + + servers = [] + #servers = client.parseDOM(result, 'li', attrs = {'data-type': 'direct'}) + servers = zip(client.parseDOM(result, 'a', ret='data-id'), client.parseDOM(result, 'a')) + + servers = [(i[0], re.findall('(\d+)', i[1])) for i in servers] + servers = [(i[0], ''.join(i[1][:1])) for i in servers] + + try: servers = [i for i in servers if '%01d' % int(i[1]) == '%01d' % int(episode)] + except: pass + + for s in servers[:len(servers)]: + try: + + headers = {'X-Requested-With': 'XMLHttpRequest'} + hash_url = urlparse.urljoin(self.base_link, self.hash_link) + query = {'ts': myts, 'id': s[0], 'update': '0', 'server':'36'} + + query.update(self.__get_token(query)) + hash_url = hash_url + '?' + urllib.urlencode(query) + headers['Referer'] = urlparse.urljoin(url, s[0]) + headers['Cookie'] = self.headers['Cookie'] + + log('INFO','get_sources-4', '%s' % hash_url, dolog=False) + result = proxies.request(hash_url, headers=headers, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) + result = json.loads(result) + + if 'error' in result and result['error'] == True: + token_error = True + query.update(self.__get_token(query, token_error=token_error)) + hash_url = hash_url + '?' + urllib.urlencode(query) + result = proxies.request(hash_url, headers=headers, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) + result = json.loads(result) + + query = {'id': s[0], 'update': '0'} + query.update(self.__get_token(query, token_error=token_error)) + else: + token_error = False + queryx = {'id': s[0], 'update': '0'} + query.update(self.__get_token(queryx)) + + url = url + '?' + urllib.urlencode(query) + #result = client2.http_get(url, headers=headers) + + #quality = '360p' + if '1080' in s[1]: + quality = '1080p' + #riptype = 'BRRIP' + elif '720' in s[1] or 'hd' in s[1].lower(): + quality = '720p' + #riptype = 'BRRIP' + elif '480' in s[1]: + quality = '480p' + #riptype = 'BRRIP' + elif 'cam' in s[1].lower() or 'ts' in s[1].lower(): + quality = '480p' + #riptype = 'CAM' + else: + quality = '480p' + #riptype = 'CAM' + + log('INFO','get_sources-5', result, dolog=False) + + if result['target'] != "": + pass + else: + grabber = result['grabber'] + grab_data = grabber + grabber_url = urlparse.urljoin(self.base_link, self.grabber_api) + + if '?' in grabber: + grab_data = grab_data.split('?') + grabber_url = grab_data[0] + grab_data = grab_data[1] + + print grab_data + grab_server = str(urlparse.parse_qs(grab_data)['server'][0]) + + b, resp = self.decode_t(result['params']['token'], -18) + if b == False: + raise Exception(resp) + token = resp + b, resp = self.decode_t(result['params']['options'], -18) + if b == False: + raise Exception(resp) + options = resp + + grab_query = {'ts':myts, grabber_url:'','id':result['params']['id'],'server':grab_server,'mobile':'0','token':token,'options':options} + tk = self.__get_token(grab_query, token_error) + + if tk == None: + raise Exception('video token algo') + grab_info = {'token':token,'options':options} + del query['server'] + query.update(grab_info) + query.update(tk) + + sub_url = result['subtitle'] + if sub_url==None or len(sub_url) == 0: + sub_url = None + + if '?' in grabber: + grabber += '&' + urllib.urlencode(query) + else: + grabber += '?' + urllib.urlencode(query) + + if grabber!=None and not grabber.startswith('http'): + grabber = 'http:'+grabber + + log('INFO','get_sources-6', grabber, dolog=False) + + result = proxies.request(grabber, headers=headers, referer=url, limit='0', proxy_options=proxy_options, use_web_proxy=self.proxyrequired, httpsskip=True) + + result = json.loads(result) + + if 'data' in result.keys(): + result = [i['file'] for i in result['data'] if 'file' in i] + + for i in result: + links_m = resolvers.createMeta(i, self.name, self.logo, quality, links_m, key, riptype, sub_url=sub_url, testing=testing) + else: + target = result['target'] + # b, resp = self.decode_t(target, -18) + # if b == False: + # raise Exception(resp) + # target = resp + sub_url = result['subtitle'] + if sub_url==None or len(sub_url) == 0: + sub_url = None + + if target!=None and not target.startswith('http'): + target = 'http:' + target + + links_m = resolvers.createMeta(target, self.name, self.logo, quality, links_m, key, riptype, sub_url=sub_url, testing=testing) + + except Exception as e: + log('FAIL', 'get_sources-7','%s' % e, dolog=False) + + sources += [l for l in links_m] + + if len(sources) == 0: + log('FAIL','get_sources','Could not find a matching title: %s' % cleantitle.title_from_key(key)) + return sources + + log('SUCCESS', 'get_sources','%s sources : %s' % (cleantitle.title_from_key(key), len(sources)), dolog=not testing) + return sources + except Exception as e: + log('ERROR', 'get_sources', '%s' % e, dolog=not testing) + return sources + + def resolve(self, url): + try: + return url + except: + return + + def r01(self, t, e, token_error=False): + i = 0 + n = 0 + for i in range(0, max(len(t), len(e))): + if i < len(e): + n += ord(e[i]) + if i < len(t): + n += ord(t[i]) + h = format(int(hex(n),16),'x') + return h + + def a01(self, t, token_error=False): + i = 0 + for e in range(0, len(t)): + if token_error == False: + i += ord(t[e]) * e + else: + i += ord(t[e]) + e + return i + + def decode_t(self, t, i): + n = [] + e = [] + r = '' + try: + if t[0] == '.': + for n in range(0, len(t)): + if n == 0 and t[n] == '.': + pass + else: + c = ord(t[n]) + if c >= 97 and c <= 122: + e.append((c - 71 + i) % 26 + 97) + elif c >= 65 and c <= 90: + e.append((c - 39 + i) % 26 + 65) + else: + e.append(c) + for ee in e: + r += chr(ee) + return True, r + except Exception as e: + log('ERROR', 'decode_t','%s' % e, dolog=False) + False, 'Error in decoding' + + def __get_token(self, n, token_error=False): + try: + d = self.TOKEN_KEY[0] + s = self.a01(d, token_error) + for i in n: + s += self.a01(self.r01(d + i, n[i]), token_error) + return {'_': str(s)} + except Exception as e: + log('ERROR', '__get_token','%s' % e, dolog=False) + + def decodeJSFCookie(self, token): + dec = jsfdecoder.JSFDecoder(token).ca_decode() + dec = dec.split('reqkey=') + dec = dec[1].split(';') + dec = dec[0] + return dec + + def getVidToken(self): + try: + all_js_url = urlparse.urljoin(self.base_link, self.ALL_JS) + unpacked_code = '' + cch = '' + if len(self.TOKEN_KEY) == 0: + all_js_pack_code = proxies.request(all_js_url, use_web_proxy=self.proxyrequired, httpsskip=True) + unpacked_code = jsunpack.unpack(all_js_pack_code) + cch = re.findall(r'%s' % client.b64decode('ZnVuY3Rpb25cKFthLXpdLFthLXpdLFthLXpdXCl7XCJ1c2Ugc3RyaWN0XCI7ZnVuY3Rpb24gW2Etel1cKFwpe3JldHVybiAoLio/KX1mdW5jdGlvbiBbYS16XVwoW2Etel1cKQ=='), unpacked_code)[0] + token_key = re.findall(r'%s=.*?\"(.*?)\"' % cch, unpacked_code)[0] + if token_key !=None and token_key != '': + self.TOKEN_KEY.append(token_key) + control.set_setting(name+'VidToken', token_key) + except Exception as e: + log('ERROR', 'getVidToken-1','%s' % e, dolog=False) + log('ERROR', 'getVidToken-1','%s' % unpacked_code, dolog=False) + log('ERROR', 'getVidToken-1','%s' % cch, dolog=False) + + try: + if len(self.TOKEN_KEY) == 0: + token_key = proxies.request(self.TOKEN_KEY_PASTEBIN_URL, use_web_proxy=self.proxyrequired, httpsskip=True) + if token_key !=None and token_key != '': + #cookie_dict.update({'token_key':token_key}) + self.TOKEN_KEY.append(token_key) + control.set_setting(name+'VidToken', token_key) + except Exception as e: + log('ERROR', 'getVidToken-2','%s' % e, dolog=False) + +def log(type='INFO', method='undefined', err='', dolog=True, logToControl=False, doPrint=True): + try: + msg = '%s: %s > %s > %s : %s' % (time.ctime(time.time()), type, name, method, err) + if dolog == True: + loggertxt.append(msg) + if logToControl == True: + control.log(msg) + if control.doPrint == True and doPrint == True: + print msg + except Exception as e: + control.log('Error in Logging: %s >>> %s' % (msg,e)) diff --git a/Contents/Libraries/Shared/resources/lib/sources/primewire_mv_tv.py b/Contents/Libraries/Shared/resources/lib/sources/primewire_mv_tv.py index 2e3de07..66b0766 100644 --- a/Contents/Libraries/Shared/resources/lib/sources/primewire_mv_tv.py +++ b/Contents/Libraries/Shared/resources/lib/sources/primewire_mv_tv.py @@ -35,7 +35,9 @@ class source: def __init__(self): del loggertxt[:] - log(type='INFO', method='init', err=' -- Initializing %s Start --' % name) + self.ver = '0.0.1' + self.update_date = 'Nov. 13, 2017' + log(type='INFO', method='init', err=' -- Initializing %s %s %s Start --' % (name, self.ver, self.update_date)) self.init = False self.base_link_alts = ['http://www.primewire.ag','http://www.primewire.is','http://www.primewire.org'] self.base_link = self.base_link_alts[0] @@ -59,7 +61,7 @@ def __init__(self): self.testparser = 'Unknown' self.testparser = self.testParser() self.init = True - log(type='INFO', method='init', err=' -- Initializing %s End --' % name) + log(type='INFO', method='init', err=' -- Initializing %s %s %s End --' % (name, self.ver, self.update_date)) def info(self): return { diff --git a/Contents/Libraries/Shared/resources/lib/sources/yesmovies_mv_tv.py b/Contents/Libraries/Shared/resources/lib/sources/yesmovies_mv_tv.py index 8932477..b1faa06 100644 --- a/Contents/Libraries/Shared/resources/lib/sources/yesmovies_mv_tv.py +++ b/Contents/Libraries/Shared/resources/lib/sources/yesmovies_mv_tv.py @@ -58,7 +58,9 @@ def my_add(x, y): class source: def __init__(self): del loggertxt[:] - log(type='INFO', method='init', err=' -- Initializing %s Start --' % name) + self.ver = '0.0.1' + self.update_date = 'Nov. 13, 2017' + log(type='INFO', method='init', err=' -- Initializing %s %s %s Start --' % (name, self.ver, self.update_date)) self.init = False self.base_link = 'https://yesmovies.to' self.MainPageValidatingContent = 'Yesmovies - Watch FREE Movies Online & TV shows' @@ -84,7 +86,7 @@ def __init__(self): self.testparser = 'Unknown' self.testparser = self.testParser() self.init = True - log(type='INFO', method='init', err=' -- Initializing %s End --' % name) + log(type='INFO', method='init', err=' -- Initializing %s %s %s End --' % (name, self.ver, self.update_date)) def info(self): return { diff --git a/Contents/Services/Shared Code/misc.pys b/Contents/Services/Shared Code/misc.pys index ea1a5aa..e206f11 100644 --- a/Contents/Services/Shared Code/misc.pys +++ b/Contents/Services/Shared Code/misc.pys @@ -51,7 +51,7 @@ IP_OVERIDE = True RE_SUB1 = Regex(r'(?m)(^[^\#])') RE_SOURCES = Regex(r'(?m)(^.+?\d+/(\d+).+$)') -supported_hosts = ['mycloud.to','mcloud.to'] +supported_hosts = ['mycloud.to','mcloud.to','rapidvideo.com'] def resolve(url, https_skip, test=False, strip_url=True): video_urlf = None @@ -64,72 +64,90 @@ def resolve(url, https_skip, test=False, strip_url=True): try: myParams = {} - myheaders = {} - myheaders['User-Agent'] = 'Mozilla' - myheaders['Referer'] = 'http://mycloud.to' - myParams['headers'] = myheaders - if strip_url == True and 'mycloud.' in url or 'mcloud.' in url: + #Log(page_data_string) + video_url_a = [] + + if 'rapidvideo.' in ourl: + myheaders = {} + myheaders['User-Agent'] = 'Mozilla' + myheaders['Referer'] = ourl + myParams['headers'] = myheaders + page_data_string = request(ourl, headers=myheaders, httpsskip=True) + page_data_elems = HTML.ElementFromString(page_data_string) try: - if '?' in ourl: - data = urlparse.parse_qs(ourl.split('?')[1]) - kurl = ourl.split('?')[0] - else: - kurl = ourl - data = {'a.url':'https%3A%2F%2Ffmovies.to%2Facode%2Fplayer.html','a.close':0,'ui':['ZwrN0oNRXfRP686L3Z3BPkXInMs']} + video_url = page_data_elems.xpath(".//div[@id='home_video']//source/@src")[0] except: - data.update({'a.url':'https%3A%2F%2Ffmovies.to%2Facode%2Fplayer.html','a.close':0,'ui':['ZwrN0oNRXfRP686L3Z3BPkXInMs']}) - kurl = ourl - - #Log(data) - url = kurl + '?ui=%s' % data['ui'][0].replace('=','') - if 'https:' not in url and 'http:' not in url: - url = 'http:' + url - - Log(url) + video_url = re.findall(r'src=\".*(http.*?mp4)\"',page_data_string)[0] + res = '720' + try: + res = page_data_elems.xpath(".//div[@id='home_video']//source/@data-res")[0] + except: + pass + f_i = {'file':video_url, 'label':res} + video_url_a.append(f_i) + video_urlf = video_url_a + elif 'mycloud.' in ourl or 'mcloud.' in ourl: + #Log('============ MyCloud URL ==================') + myheaders = {} + myheaders['User-Agent'] = 'Mozilla' + myheaders['Referer'] = 'http://mcloud.to' + myParams['headers'] = myheaders + + if strip_url == True and 'mycloud.' in url or 'mcloud.' in url: + try: + if '?' in ourl: + data = urlparse.parse_qs(ourl.split('?')[1]) + kurl = ourl.split('?')[0] + else: + kurl = ourl + data = {'a.url':'https%3A%2F%2Ffmovies.to%2Facode%2Fplayer.html','a.close':0,'ui':['ZwrN0oNRXfRP686L3Z3BPkXInMs']} + except: + data.update({'a.url':'https%3A%2F%2Ffmovies.to%2Facode%2Fplayer.html','a.close':0,'ui':['ZwrN0oNRXfRP686L3Z3BPkXInMs']}) + kurl = ourl - page_data_string = request(url, headers=myheaders, httpsskip=True) - #Log(page_data_string) - - if page_data_string != None: - if 'mycloud.' in ourl or 'mcloud.' in ourl: - #Log('============ MyCloud URL ==================') + #Log(data) + url = kurl + '?ui=%s' % data['ui'][0].replace('=','') + if 'https:' not in url and 'http:' not in url: + url = 'http:' + url + + page_data_string = request(url, headers=myheaders, httpsskip=True) + + if 'Sorry, the page you are looking for could not be found.' not in page_data_string and 'This video is in processing' not in page_data_string: + json_data_str = re.findall(r'({\".*(.*m3u8|.mp4|.flv).*\"})', page_data_string)[0][0] + json_data = json.loads(json_data_str) + video_url = json_data['file'] + if 'https:' not in video_url and 'http:' not in video_url: + video_url = 'http:' + video_url - if 'Sorry, the page you are looking for could not be found.' not in page_data_string and 'This video is in processing' not in page_data_string: - json_data_str = re.findall(r'({\".*(.*m3u8|.mp4|.flv).*\"})', page_data_string)[0][0] - json_data = json.loads(json_data_str) - video_url = json_data['file'] - if 'https:' not in video_url and 'http:' not in video_url: - video_url = 'http:' + video_url - + #Log('=======video_urls_data========') + video_urls_data = request(video_url, headers=myheaders, httpsskip=True) + + if video_urls_data == None or video_urls_data == '': + raise ValueError('M3U8 URL is empty') + #Log('video_urls_data ------------> %s' % video_urls_data) + + try: + video_urls_arr = re.findall(r'(hls.*)', video_urls_data) + #Log('video_urls_arr ------------> %s' % video_urls_arr) + if len(video_urls_arr) == 0: + raise #Log('=======video_urls_data========') - video_urls_data = request(video_url, headers=myheaders, httpsskip=True) - - if video_urls_data == None or video_urls_data == '': - raise ValueError('M3U8 URL is empty') - #Log('video_urls_data ------------> %s' % video_urls_data) - - video_url_a = [] - try: - video_urls_arr = re.findall(r'(hls.*)', video_urls_data) - #Log('video_urls_arr ------------> %s' % video_urls_arr) - if len(video_urls_arr) == 0: - raise - #Log('=======video_urls_data========') - for v in video_urls_arr: - f_i = {'file':video_url.split('list.m3u8')[0] + v, 'label':v.split('/')[1]} - video_url_a.append(f_i) - #Log(f_i) - except: - f_i = {'file':video_url, 'label':'720'} + for v in video_urls_arr: + str_url = video_url.split('list.m3u8')[0] + v + #str_url = video_url + f_i = {'file':str_url, 'label':v.split('/')[1]} video_url_a.append(f_i) - video_urlf = video_url_a - #Log('video_urlf ------------> %s' % video_urlf) - - if test == True: - Log('*** Testing MyCloud ***') - vurls = mycloud_streams(video_urlf[len(video_urlf)-1], https_skip, myheaders) + except: + f_i = {'file':video_url, 'label':'720'} + video_url_a.append(f_i) + video_urlf = video_url_a + Log('video_urlf ------------> %s' % video_urlf) + if test == True: + Log('*** Testing MyCloud ***') + vurls = mycloud_streams(video_urlf[len(video_urlf)-1], https_skip, myheaders) + except Exception as e: Log('Misc.pys > resolve > Error : %s' % e) pass diff --git a/Contents/Services/URL/FMovies/ServiceCode.pys b/Contents/Services/URL/FMovies/ServiceCode.pys index 4261fd2..4a88ae8 100644 --- a/Contents/Services/URL/FMovies/ServiceCode.pys +++ b/Contents/Services/URL/FMovies/ServiceCode.pys @@ -127,6 +127,11 @@ def MediaObjectsForURL(url, **kwargs): pairrequired = False direct_play = True + try: + direct_play = not data['force_transcode'] + except: + pass + try: openloadApiKey = data['control_openload_api_key'] except: @@ -164,7 +169,13 @@ def MediaObjectsForURL(url, **kwargs): files = [{'file':ourl,'label': res}] if 'openload' not in host: - furl, params, direct_play = host_misc_resolvers.resolve(url=ourl, https_skip=https_skip) + furl, params, direct_play_file = host_misc_resolvers.resolve(url=ourl, https_skip=https_skip) + + if direct_play == False: + pass + else: + direct_play = direct_play_file + #Log("furl -------- %s" % furl) #Log("params --- %s" % json.loads(base64.b64decode(params))) @@ -182,16 +193,15 @@ def MediaObjectsForURL(url, **kwargs): for file in files: furl = file['file'] res = int(file['label']) + #Log("---------furl-------") + #Log("%s" % furl) + if '.m3u8' in furl: - #Log("---------furl-------") - #Log("%s" % furl) - mo = MediaObject( protocol = 'hls' if direct_play==True else None, container = 'mpegts' if direct_play==True else None, audio_codec = AudioCodec.AAC if direct_play==True else None, - video_resolution = res, - audio_channels = 2, + video_resolution = res if direct_play else None, optimized_for_streaming = direct_play if direct_play==True else False, parts = [PartObject(key=Callback(PlayVideo, url=furl, refUrl=ourl, isTargetPlay=True, pairrequired=pairrequired, https_skip=https_skip, params=params, host=host, openloadApiKey=openloadApiKey))] ) @@ -200,8 +210,7 @@ def MediaObjectsForURL(url, **kwargs): container = Container.MP4 if direct_play==True else None, video_codec = VideoCodec.H264 if direct_play==True else None, audio_codec = AudioCodec.AAC if direct_play==True else None, - video_resolution = res, - audio_channels = 2, + video_resolution = res if direct_play else None, optimized_for_streaming = direct_play if direct_play==True else False, parts = [PartObject(key=Callback(PlayVideo, url=furl, isTargetPlay=True, pairrequired=pairrequired, https_skip=https_skip, params=params, host=host, openloadApiKey=openloadApiKey))] ) @@ -260,21 +269,19 @@ def MediaObjectsForURL(url, **kwargs): if type == 'flv' or '.flv' in furl: mo = MediaObject( - container = Container.FLV, - video_codec = VideoCodec.H264, - audio_codec = AudioCodec.AAC, - video_resolution = res, - audio_channels = 2, + container = Container.FLV if direct_play else None, + video_codec = VideoCodec.H264 if direct_play else None, + audio_codec = AudioCodec.AAC if direct_play else None, + video_resolution = res if direct_play else None, optimized_for_streaming = direct_play, parts = [PartObject(key=Callback(PlayVideo, url=furl, useRedirect=useRedirect, refUrl=redUrl, https_skip=https_skip, params=params, host=host))] ) elif type == '.m3u8' or '.m3u8' in furl: mo = MediaObject( - protocol = 'hls', - container = 'mpegts', - audio_codec = AudioCodec.AAC, - video_resolution = res, - audio_channels = 2, + protocol = 'hls' if direct_play else None, + container = 'mpegts' if direct_play else None, + audio_codec = AudioCodec.AAC if direct_play else None, + video_resolution = res if direct_play else None, optimized_for_streaming = direct_play, parts = [PartObject(key=Callback(PlayVideo, url=furl, useRedirect=useRedirect, refUrl=redUrl, https_skip=https_skip, params=params, host=host))] ) @@ -283,8 +290,7 @@ def MediaObjectsForURL(url, **kwargs): container = Container.MP4 if direct_play else None, video_codec = VideoCodec.H264 if direct_play else None, audio_codec = AudioCodec.AAC if direct_play else None, - video_resolution = res, - audio_channels = 2, + video_resolution = res if direct_play else None, optimized_for_streaming = direct_play, parts = [PartObject(key=Callback(PlayVideo, url=furl, useRedirect=useRedirect, refUrl=redUrl, https_skip=https_skip, params=params, host=host))] ) @@ -339,7 +345,7 @@ def PlayVideo(url, isTargetPlay=False, useRedirect=False, refUrl=None, params=No if 'blogspot.com' in url: time.sleep(3) - + return IndirectResponse(VideoClipObject, key=url, http_headers=http_headers, post_headers=http_headers, http_cookies=http_cookies) ####################################################################################################