diff --git a/README.md b/README.md index 4629479..c1dccdb 100644 --- a/README.md +++ b/README.md @@ -4,4 +4,16 @@ EarthCam.com is the premiere network of scenic webcams and offers a complete dat v1.0.7 was broken, so I forked this v1.0.7.1 to fix it. -forked by idleloop from [official-xbmc-hub-repo](http://www.xbmchub.com/forums/official-xbmc-hub-repo/) \ No newline at end of file +### Fork + +forked from [official-xbmc-hub-repo](http://www.xbmchub.com/forums/official-xbmc-hub-repo/) + +### License + +Distributed [under GPL 3](http://www.gnu.org/licenses/gpl-3.0.html). + +Original jzbd project distributed under GPL 2.1. + +### Contact + +idleloop -at- yahoo.com \ No newline at end of file diff --git a/addon.xml b/addon.xml index 03b4b36..168d309 100644 --- a/addon.xml +++ b/addon.xml @@ -1,8 +1,8 @@ + version="1.0.7.1" + provider-name="xbmchub.com, (forked by idleloop)"> diff --git a/changelog.txt b/changelog.txt index 60f42de..6de42bf 100644 --- a/changelog.txt +++ b/changelog.txt @@ -1,3 +1,8 @@ +V1.0.7.1 +(2014, Oct) +Code forked by idleloop +to fix no video at all + V1.0.7 Code updated by Blazetamer to fix no items in USA & Worldwide \ No newline at end of file diff --git a/channel.py b/channel.py index bef00b9..fd0bc5a 100644 --- a/channel.py +++ b/channel.py @@ -7,6 +7,7 @@ #------------------------------------------------------------ #Code Upated by: Blazetamer 2014 +#Code Upated by: idleloop @ 2014, Oct import urlparse,urllib2,urllib,re import os, sys @@ -36,20 +37,20 @@ def worldwide(item): itemlist = [] data = scrapertools.cache_page(item.url) - patron = ';" href="(.+?)" class="locationLink">(.+?)' + patron = ';" href="([^"]+)" class="locationLink">(.+?)' #patron = '

([^<]+)

' matches = re.compile(patron,re.DOTALL).findall(data) - if DEBUG: scrapertools.printMatches(matches) + if (DEBUG==True): scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: url = urlparse.urljoin(item.url,scrapedurl) url = url.replace('[','').replace(']','') print'WORLDWIDE PARSED IS ' +url title = scrapedtitle.strip() - if (DEBUG): logger.info("title=["+title+"], url=["+url+"]") + if (DEBUG==True): logger.info("title=["+title+"], url=["+url+"]") if 'page=world' in url: itemlist.append( Item(action="cams", title=title , url=url, fanart=os.path.join(IMAGES,"fanart.jpg") ) ) - + return itemlist def usa(item): @@ -57,27 +58,28 @@ def usa(item): itemlist = [] data = scrapertools.cache_page(item.url) - patron = ';" href="(.+?)" class="locationLink">(.+?)' + patron = ';" href="([^"]+)" class="locationLink">(.+?)' #patron = '

([^<]+)' matches = re.compile(patron,re.DOTALL).findall(data) #match=re.compile(';" href="(.+?)" class="locationLink">(.+?)').findall(link) - if DEBUG: scrapertools.printMatches(matches) + if (DEBUG==True): scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: url = urlparse.urljoin(item.url,scrapedurl) print'USA PARSED IS ' +url title = scrapedtitle.strip() - if (DEBUG): logger.info("title=["+title+"], url=["+url+"]") + if (DEBUG==True): logger.info("title=["+title+"], url=["+url+"]") if 'country=us'in url: itemlist.append( Item(action="cams", title=title , url=url, fanart=os.path.join(IMAGES,"fanart.jpg") ) ) - + return itemlist def cams(item): logger.info("[channel.py] cams") itemlist = [] - data = scrapertools.cache_page(item.url) + item.url.replace(" ","%20") + data = scrapertools.cache_page(item.url.replace(" ","%20")) #logger.info("data="+data) patron = '1): # just checking how menu submenus are here... if >1, info is already enough + return itemlist + logger.info("cam_id="+str(cam_id)) + liveon = cam_data[cam_id]["liveon"] + logger.info("liveon="+liveon) + if liveon!="disabled": + ###video_url = cam_data[cam_id]["worldtour_path"] + video_url + try: + if "worldtour_path" in cam_data[cam_id] and re.search( r'(\.flv|\.mp4|\.jpg|\.png)$', cam_data[cam_id]["worldtour_path"] ): + video_url = cam_data[cam_id]["worldtour_path"] + elif "livestreamingpath" in cam_data[cam_id] and re.search( r'(\.flv|\.mp4)$', cam_data[cam_id]["livestreamingpath"] ): + video_url = cam_data[cam_id]["streamingdomain"] + cam_data[cam_id]["livestreamingpath"] + elif "timelapsepath" in cam_data[cam_id] and re.search( r'(\.flv|\.mp4)$', cam_data[cam_id]["timelapsepath"] ): + video_url = cam_data[cam_id]["timelapsedomain"] + cam_data[cam_id]["timelapsepath"] + elif "archivepath" in cam_data[cam_id] and re.search( r'(\.flv|\.mp4)$', cam_data[cam_id]["archivepath"] ): + video_url = cam_data[cam_id]["archivedomain"] + cam_data[cam_id]["archivepath"] + else: + continue + video_url.replace("//","/") + url = calculate_url(video_url) + item=Item(action="play", url=url, + folder=False) + try: + item.title=cam_data[cam_id]["title"] + except Exception, e: + item.title=str(cam_id) + try: + item.fanart='http://static.earthcamcdn.com'+cam_data[cam_id]["offlineimage"] + except Exception, e: + logger.info("[channel.py] [play] ERROR: no fanart") + try: + item.thumbnail=cam_data[cam_id]["thumbimage"] + except Exception, e: + logger.info("[channel.py] [play] ERROR: no thumbnail") + try: + item.plot = re.sub(r']*>', '', + cam_data[cam_id]["description"].replace('+', ' '), + flags=re.IGNORECASE ) + item.plot = re.sub(r'<[^>]+>', "\n", item.plot) + except Exception, e: + logger.info("[channel.py] [play] ERROR: no plot") + itemlist.append( item ) + except Exception, e: + logger.info("[channel.py] [play] ERROR:"+url) + return itemlist + +def play(item): + itemlist = [] + if re.search( r'(\.flv|\.mp4|\.jpg|\.png)$', item.url ): + itemlist.append( item ) + else: # for backward compatitbility with v1.0.7 favorites + itemlist=previous_play( item ) + return itemlist + +def calculate_url(video_url): #video_url2 = scrapertools.get_match(json_decoded,'"worldtour_path"\:"([^"]+)"') #logger.info("video_url2="+video_url2) - #video_url = "rtmp://video2.earthcam.com/fecnetwork/hdtimes11.flv" #./rtmpdump-2.4 -r "rtmp://video2.earthcam.com/fecnetwork/4828.flv" --swfVfy "http://www.earthcam.com/swf/cam_player_v2/ecnPlayer.swf?20121010" --pageUrl "http://www.earthcam.com/world/turkey/istanbul/" --tcUrl "rtmp://video2.earthcam.com/fecnetwork" --app fecnetwork --live --playpath "4828.flv" -o out.flv - # Taken from http://forum.xbmc.org/archive/index.php/thread-120418-20.html # rtmp://video2.earthcam.com/ app=fecnetwork swfUrl=http://www.earthcam.com/swf/cam_player_v2/ecnPlayer.swf playpath=fridaysHD1.flv live=true timeout=180 - if video_url.lower().endswith(".jpg") or video_url.lower().endswith(".png"): url = video_url + elif video_url.lower().startswith("http://") or video_url.lower().endswith(".mp4"): + url = video_url else: rtmp_url = scrapertools.get_match(video_url,"(rtmp\://[^\/]+/)") app = scrapertools.get_match(video_url,"rtmp\://[^\/]+/([a-z]+)/") - playpath = scrapertools.get_match(video_url,"rtmp\://[^\/]+/[a-z]+/([a-zA-Z0-9]+\.flv)") + ###playpath = scrapertools.get_match(video_url,"rtmp\://[^\/]+/[a-z]+/([a-zA-Z0-9]+\.flv)") + playpath = scrapertools.get_match(video_url,"rtmp\://[^\/]+/[a-z]+/(.+\.flv)") swfurl = "http://www.earthcam.com/swf/cam_player_v2/ecnPlayer.swf" - pageurl = item.url - url=rtmp_url + " app=" + app + " swfUrl=" + swfurl + " playpath=" + playpath + " live=true timeout=180" - logger.info("url="+url) + logger.info("url="+url) + return url - itemlist.append( Item(action="play", title=item.title , server="directo", url=url, fanart=item.thumbnail, thumbnail=item.thumbnail, folder=False) ) - - return itemlist def load_json(data): # callback to transform json string values to utf8 diff --git a/core/scrapertools.py b/core/scrapertools.py index 0377871..2d109a5 100644 --- a/core/scrapertools.py +++ b/core/scrapertools.py @@ -25,7 +25,7 @@ CACHE_PATH = config.get_setting("cache.dir") logger.info("[scrapertools.py] CACHE_PATH="+CACHE_PATH) -DEBUG = False +DEBUG = config.get_setting("debug") def cache_page(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']],modo_cache=CACHE_ACTIVA, timeout=socket.getdefaulttimeout()): return cachePage(url,post,headers,modo_cache,timeout=timeout) @@ -33,16 +33,16 @@ def cache_page(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; # TODO: (3.1) Quitar el parámetro modoCache (ahora se hace por configuración) # TODO: (3.2) Usar notación minusculas_con_underscores para funciones y variables como recomienda Python http://www.python.org/dev/peps/pep-0008/ def cachePage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']],modoCache=CACHE_ACTIVA, timeout=socket.getdefaulttimeout()): - logger.info("[scrapertools.py] cachePage url="+url) + if (DEBUG==True): logger.info("[scrapertools.py] cachePage url="+url) modoCache = config.get_setting("cache.mode") ''' if config.get_platform()=="plex": from PMS import HTTP try: - logger.info("url="+url) + if (DEBUG==True): logger.info("url="+url) data = HTTP.Request(url) - logger.info("descargada") + if (DEBUG==True): logger.info("descargada") except: data = "" logger.error("Error descargando "+url) @@ -55,7 +55,7 @@ def cachePage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; # CACHE_NUNCA: Siempre va a la URL a descargar # obligatorio para peticiones POST if modoCache == CACHE_NUNCA or post is not None: - logger.info("[scrapertools.py] MODO_CACHE=2 (no cachear)") + if (DEBUG==True): logger.info("[scrapertools.py] MODO_CACHE=2 (no cachear)") try: data = downloadpage(url,post,headers, timeout=timeout) @@ -64,7 +64,7 @@ def cachePage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; # CACHE_SIEMPRE: Siempre descarga de cache, sin comprobar fechas, excepto cuando no está elif modoCache == CACHE_SIEMPRE: - logger.info("[scrapertools.py] MODO_CACHE=1 (cachear todo)") + if (DEBUG==True): logger.info("[scrapertools.py] MODO_CACHE=1 (cachear todo)") # Obtiene los handlers del fichero en la cache cachedFile, newFile = getCacheFileNames(url) @@ -81,16 +81,16 @@ def cachePage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; outfile.write(data) outfile.flush() outfile.close() - logger.info("[scrapertools.py] Grabado a " + newFile) + if (DEBUG==True): logger.info("[scrapertools.py] Grabado a " + newFile) else: - logger.info("[scrapertools.py] Leyendo de cache " + cachedFile) + if (DEBUG==True): logger.info("[scrapertools.py] Leyendo de cache " + cachedFile) infile = open( cachedFile ) data = infile.read() infile.close() # CACHE_ACTIVA: Descarga de la cache si no ha cambiado else: - logger.info("[scrapertools.py] MODO_CACHE=0 (automática)") + if (DEBUG==True): logger.info("[scrapertools.py] MODO_CACHE=0 (automática)") # Datos descargados data = "" @@ -110,15 +110,15 @@ def cachePage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; outfile.write(data) outfile.flush() outfile.close() - logger.info("[scrapertools.py] Grabado a " + newFile) + if (DEBUG==True): logger.info("[scrapertools.py] Grabado a " + newFile) # Si sólo hay uno comprueba el timestamp (hace una petición if-modified-since) else: # Extrae el timestamp antiguo del nombre del fichero oldtimestamp = time.mktime( time.strptime(cachedFile[-20:-6], "%Y%m%d%H%M%S") ) - logger.info("[scrapertools.py] oldtimestamp="+cachedFile[-20:-6]) - logger.info("[scrapertools.py] oldtimestamp="+time.ctime(oldtimestamp)) + if (DEBUG==True): logger.info("[scrapertools.py] oldtimestamp="+cachedFile[-20:-6]) + if (DEBUG==True): logger.info("[scrapertools.py] oldtimestamp="+time.ctime(oldtimestamp)) # Hace la petición updated,data = downloadtools.downloadIfNotModifiedSince(url,oldtimestamp) @@ -134,10 +134,10 @@ def cachePage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; outfile.write(data) outfile.flush() outfile.close() - logger.info("[scrapertools.py] Grabado a " + newFile) + if (DEBUG==True): logger.info("[scrapertools.py] Grabado a " + newFile) # Devuelve el contenido del fichero de la cache else: - logger.info("[scrapertools.py] Leyendo de cache " + cachedFile) + if (DEBUG==True): logger.info("[scrapertools.py] Leyendo de cache " + cachedFile) infile = open( cachedFile ) data = infile.read() infile.close() @@ -227,11 +227,11 @@ def getSiteCachePath(url): def cachePage2(url,headers): - logger.info("Descargando " + url) + if (DEBUG==True): logger.info("Descargando " + url) inicio = time.clock() req = urllib2.Request(url) for header in headers: - logger.info(header[0]+":"+header[1]) + if (DEBUG==True): logger.info(header[0]+":"+header[1]) req.add_header(header[0], header[1]) try: @@ -239,26 +239,26 @@ def cachePage2(url,headers): except: req = urllib2.Request(url.replace(" ","%20")) for header in headers: - logger.info(header[0]+":"+header[1]) + if (DEBUG==True): logger.info(header[0]+":"+header[1]) req.add_header(header[0], header[1]) response = urllib2.urlopen(req) data=response.read() response.close() fin = time.clock() - logger.info("Descargado en %d segundos " % (fin-inicio+1)) + if (DEBUG==True): logger.info("Descargado en %d segundos " % (fin-inicio+1)) ''' outfile = open(localFileName,"w") outfile.write(data) outfile.flush() outfile.close() - logger.info("Grabado a " + localFileName) + if (DEBUG==True): logger.info("Grabado a " + localFileName) ''' return data def cachePagePost(url,post): - logger.info("Descargando " + url) + if (DEBUG==True): logger.info("Descargando " + url) inicio = time.clock() req = urllib2.Request(url,post) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3') @@ -272,14 +272,14 @@ def cachePagePost(url,post): data=response.read() response.close() fin = time.clock() - logger.info("Descargado en %d segundos " % (fin-inicio+1)) + if (DEBUG==True): logger.info("Descargado en %d segundos " % (fin-inicio+1)) ''' outfile = open(localFileName,"w") outfile.write(data) outfile.flush() outfile.close() - logger.info("Grabado a " + localFileName) + if (DEBUG==True): logger.info("Grabado a " + localFileName) ''' return data @@ -295,13 +295,13 @@ def http_error_302(self, req, fp, code, msg, headers): http_error_307 = http_error_302 def downloadpage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']],follow_redirects=True, timeout=socket.getdefaulttimeout()): - logger.info("[scrapertools.py] downloadpage") - logger.info("[scrapertools.py] url="+url) + if (DEBUG==True): logger.info("[scrapertools.py] downloadpage") + if (DEBUG==True): logger.info("[scrapertools.py] url="+url) if post is not None: - logger.info("[scrapertools.py] post="+post) + if (DEBUG==True): logger.info("[scrapertools.py] post="+post) else: - logger.info("[scrapertools.py] post=None") + if (DEBUG==True): logger.info("[scrapertools.py] post=None") # --------------------------------- # Instala las cookies @@ -309,7 +309,7 @@ def downloadpage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; # Inicializa la librería de las cookies ficherocookies = os.path.join( config.get_setting("cookies.dir"), 'cookies.dat' ) - logger.info("[scrapertools.py] ficherocookies="+ficherocookies) + if (DEBUG==True): logger.info("[scrapertools.py] ficherocookies="+ficherocookies) cj = None ClientCookie = None @@ -317,29 +317,29 @@ def downloadpage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; # Let's see if cookielib is available try: - logger.info("[scrapertools.py] Importando cookielib") + if (DEBUG==True): logger.info("[scrapertools.py] Importando cookielib") import cookielib except ImportError: - logger.info("[scrapertools.py] cookielib no disponible") + if (DEBUG==True): logger.info("[scrapertools.py] cookielib no disponible") # If importing cookielib fails # let's try ClientCookie try: - logger.info("[scrapertools.py] Importando ClientCookie") + if (DEBUG==True): logger.info("[scrapertools.py] Importando ClientCookie") import ClientCookie except ImportError: - logger.info("[scrapertools.py] ClientCookie no disponible") + if (DEBUG==True): logger.info("[scrapertools.py] ClientCookie no disponible") # ClientCookie isn't available either urlopen = urllib2.urlopen Request = urllib2.Request else: - logger.info("[scrapertools.py] ClientCookie disponible") + if (DEBUG==True): logger.info("[scrapertools.py] ClientCookie disponible") # imported ClientCookie urlopen = ClientCookie.urlopen Request = ClientCookie.Request cj = ClientCookie.MozillaCookieJar() else: - logger.info("[scrapertools.py] cookielib disponible") + if (DEBUG==True): logger.info("[scrapertools.py] cookielib disponible") # importing cookielib worked urlopen = urllib2.urlopen Request = urllib2.Request @@ -350,23 +350,23 @@ def downloadpage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; if cj is not None: # we successfully imported # one of the two cookie handling modules - logger.info("[scrapertools.py] Hay cookies") + if (DEBUG==True): logger.info("[scrapertools.py] Hay cookies") if os.path.isfile(ficherocookies): - logger.info("[scrapertools.py] Leyendo fichero cookies") + if (DEBUG==True): logger.info("[scrapertools.py] Leyendo fichero cookies") # if we have a cookie file already saved # then load the cookies into the Cookie Jar try: cj.load(ficherocookies) except: - logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") + if (DEBUG==True): logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") os.remove(ficherocookies) # Now we need to get our Cookie Jar # installed in the opener; # for fetching URLs if cookielib is not None: - logger.info("[scrapertools.py] opener usando urllib2 (cookielib)") + if (DEBUG==True): logger.info("[scrapertools.py] opener usando urllib2 (cookielib)") # if we use cookielib # then we get the HTTPCookieProcessor # and install the opener in urllib2 @@ -377,7 +377,7 @@ def downloadpage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; urllib2.install_opener(opener) else: - logger.info("[scrapertools.py] opener usando ClientCookie") + if (DEBUG==True): logger.info("[scrapertools.py] opener usando ClientCookie") # if we use ClientCookie # then we get the HTTPCookieProcessor # and install the opener in ClientCookie @@ -396,16 +396,16 @@ def downloadpage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; # Construye el request if post is None: - logger.info("[scrapertools.py] petición GET") + if (DEBUG==True): logger.info("[scrapertools.py] petición GET") else: - logger.info("[scrapertools.py] petición POST") + if (DEBUG==True): logger.info("[scrapertools.py] petición POST") # Añade las cabeceras - logger.info("[scrapertools.py] ---------------------------") + if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------") for header in headers: - logger.info("[scrapertools.py] header %s=%s" % (str(header[0]),str(header[1])) ) + if (DEBUG==True): logger.info("[scrapertools.py] header %s=%s" % (str(header[0]),str(header[1])) ) txheaders[header[0]]=header[1] - logger.info("[scrapertools.py] ---------------------------") + if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------") req = Request(url, post, txheaders) if timeout is None: @@ -430,12 +430,12 @@ def downloadpage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; # Lee los datos y cierra data=handle.read() info = handle.info() - logger.info("[scrapertools.py] Respuesta") - logger.info("[scrapertools.py] ---------------------------") + if (DEBUG==True): logger.info("[scrapertools.py] Respuesta") + if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------") for header in info: - logger.info("[scrapertools.py] "+header+"="+info[header]) + if (DEBUG==True): logger.info("[scrapertools.py] "+header+"="+info[header]) handle.close() - logger.info("[scrapertools.py] ---------------------------") + if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------") ''' # Lanza la petición @@ -454,7 +454,7 @@ def downloadpage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; # Tiempo transcurrido fin = time.clock() - logger.info("[scrapertools.py] Descargado en %d segundos " % (fin-inicio+1)) + if (DEBUG==True): logger.info("[scrapertools.py] Descargado en %d segundos " % (fin-inicio+1)) return data @@ -465,7 +465,7 @@ def downloadpagewithcookies(url): # Inicializa la librería de las cookies ficherocookies = os.path.join( config.get_data_path(), 'cookies.dat' ) - logger.info("[scrapertools.py] Cookiefile="+ficherocookies) + if (DEBUG==True): logger.info("[scrapertools.py] Cookiefile="+ficherocookies) cj = None ClientCookie = None @@ -507,7 +507,7 @@ def downloadpagewithcookies(url): try: cj.load(ficherocookies) except: - logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") + if (DEBUG==True): logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") os.remove(ficherocookies) # Now we need to get our Cookie Jar @@ -555,7 +555,7 @@ def downloadpagewithcookies(url): return data def downloadpageWithoutCookies(url): - logger.info("[scrapertools.py] Descargando " + url) + if (DEBUG==True): logger.info("[scrapertools.py] Descargando " + url) inicio = time.clock() req = urllib2.Request(url) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; es-ES; rv:1.9.0.14) Gecko/2009082707 Firefox/3.0.14') @@ -570,7 +570,7 @@ def downloadpageWithoutCookies(url): data=response.read() response.close() fin = time.clock() - logger.info("[scrapertools.py] Descargado en %d segundos " % (fin-inicio+1)) + if (DEBUG==True): logger.info("[scrapertools.py] Descargado en %d segundos " % (fin-inicio+1)) return data @@ -578,7 +578,7 @@ def downloadpageGzip(url): # Inicializa la librería de las cookies ficherocookies = os.path.join( config.get_data_path(), 'cookies.dat' ) - logger.info("Cookiefile="+ficherocookies) + if (DEBUG==True): logger.info("Cookiefile="+ficherocookies) inicio = time.clock() cj = None @@ -625,7 +625,7 @@ def downloadpageGzip(url): try: cj.load(ficherocookies) except: - logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") + if (DEBUG==True): logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") os.remove(ficherocookies) # Now we need to get our Cookie Jar @@ -655,7 +655,7 @@ def downloadpageGzip(url): import httplib parsedurl = urlparse.urlparse(url) - logger.info("parsedurl="+str(parsedurl)) + if (DEBUG==True): logger.info("parsedurl="+str(parsedurl)) txheaders = { 'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3', @@ -666,7 +666,7 @@ def downloadpageGzip(url): 'Keep-Alive':'300', 'Connection':'keep-alive', 'Referer':parsedurl[0]+"://"+parsedurl[1]} - logger.info(str(txheaders)) + if (DEBUG==True): logger.info(str(txheaders)) # fake a user agent, some websites (like google) don't like automated exploration @@ -678,7 +678,7 @@ def downloadpageGzip(url): handle.close() fin = time.clock() - logger.info("[scrapertools.py] Descargado 'Gzipped data' en %d segundos " % (fin-inicio+1)) + if (DEBUG==True): logger.info("[scrapertools.py] Descargado 'Gzipped data' en %d segundos " % (fin-inicio+1)) # Descomprime el archivo de datos Gzip try: @@ -690,7 +690,7 @@ def downloadpageGzip(url): data1 = gzipper.read() gzipper.close() fin = time.clock() - logger.info("[scrapertools.py] 'Gzipped data' descomprimido en %d segundos " % (fin-inicio+1)) + if (DEBUG==True): logger.info("[scrapertools.py] 'Gzipped data' descomprimido en %d segundos " % (fin-inicio+1)) return data1 except: return data @@ -698,7 +698,7 @@ def downloadpageGzip(url): def printMatches(matches): i = 0 for match in matches: - logger.info("[scrapertools.py] %d %s" % (i , match)) + if (DEBUG==True): logger.info("[scrapertools.py] %d %s" % (i , match)) i = i + 1 def get_match(data,patron,index=0): @@ -726,7 +726,7 @@ def fixup(m): return unichr(int(text[2:-1])).encode("utf-8") except ValueError: - logger.info("error de valor") + if (DEBUG==True): logger.info("error de valor") pass else: # named entity @@ -745,7 +745,7 @@ def fixup(m): import htmlentitydefs text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8") except KeyError: - logger.info("keyerror") + if (DEBUG==True): logger.info("keyerror") pass except: pass @@ -976,16 +976,16 @@ def getLocationHeaderFromResponse(url): def get_header_from_response(url,header_to_get="",post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']]): header_to_get = header_to_get.lower() - logger.info("[scrapertools.py] get_header_from_response url="+url+", header_to_get="+header_to_get) + if (DEBUG==True): logger.info("[scrapertools.py] get_header_from_response url="+url+", header_to_get="+header_to_get) if post is not None: - logger.info("[scrapertools.py] post="+post) + if (DEBUG==True): logger.info("[scrapertools.py] post="+post) else: - logger.info("[scrapertools.py] post=None") + if (DEBUG==True): logger.info("[scrapertools.py] post=None") # Inicializa la librería de las cookies ficherocookies = os.path.join( config.get_setting("cookies.dir"), 'cookies.dat' ) - logger.info("[scrapertools.py] ficherocookies="+ficherocookies) + if (DEBUG==True): logger.info("[scrapertools.py] ficherocookies="+ficherocookies) cj = None ClientCookie = None @@ -1000,13 +1000,13 @@ def get_header_from_response(url,header_to_get="",post=None,headers=[['User-Agen # that has useful load and save methods if os.path.isfile(ficherocookies): - logger.info("[scrapertools.py] Leyendo fichero cookies") + if (DEBUG==True): logger.info("[scrapertools.py] Leyendo fichero cookies") # if we have a cookie file already saved # then load the cookies into the Cookie Jar try: cj.load(ficherocookies) except: - logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") + if (DEBUG==True): logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") os.remove(ficherocookies) if header_to_get=="location": @@ -1023,9 +1023,9 @@ def get_header_from_response(url,header_to_get="",post=None,headers=[['User-Agen # Traza la peticion if post is None: - logger.info("[scrapertools.py] petición GET") + if (DEBUG==True): logger.info("[scrapertools.py] petición GET") else: - logger.info("[scrapertools.py] petición POST") + if (DEBUG==True): logger.info("[scrapertools.py] petición POST") # Login y password Filenium # http://abcd%40gmail.com:mipass@filenium.com/get/Oi8vd3d3/LmZpbGVz/ZXJ2ZS5j/b20vZmls/ZS9kTnBL/dm11/b0/?.zip @@ -1035,11 +1035,11 @@ def get_header_from_response(url,header_to_get="",post=None,headers=[['User-Agen headers.append( [ "Authorization",authorization_header ] ) # Array de cabeceras - logger.info("[scrapertools.py] ---------------------------") + if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------") for header in headers: - logger.info("[scrapertools.py] header=%s" % str(header[0])) + if (DEBUG==True): logger.info("[scrapertools.py] header=%s" % str(header[0])) txheaders[header[0]]=header[1] - logger.info("[scrapertools.py] ---------------------------") + if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------") # Construye el request req = Request(url, post, txheaders) @@ -1051,34 +1051,34 @@ def get_header_from_response(url,header_to_get="",post=None,headers=[['User-Agen # Lee los datos y cierra #data=handle.read() info = handle.info() - logger.info("[scrapertools.py] Respuesta") - logger.info("[scrapertools.py] ---------------------------") + if (DEBUG==True): logger.info("[scrapertools.py] Respuesta") + if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------") location_header="" for header in info: - logger.info("[scrapertools.py] "+header+"="+info[header]) + if (DEBUG==True): logger.info("[scrapertools.py] "+header+"="+info[header]) if header==header_to_get: location_header=info[header] handle.close() - logger.info("[scrapertools.py] ---------------------------") + if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------") # Tiempo transcurrido fin = time.clock() - logger.info("[scrapertools.py] Descargado en %d segundos " % (fin-inicio+1)) + if (DEBUG==True): logger.info("[scrapertools.py] Descargado en %d segundos " % (fin-inicio+1)) return location_header def get_headers_from_response(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']]): return_headers = [] - logger.info("[scrapertools.py] get_headers_from_response url="+url) + if (DEBUG==True): logger.info("[scrapertools.py] get_headers_from_response url="+url) if post is not None: - logger.info("[scrapertools.py] post="+post) + if (DEBUG==True): logger.info("[scrapertools.py] post="+post) else: - logger.info("[scrapertools.py] post=None") + if (DEBUG==True): logger.info("[scrapertools.py] post=None") # Inicializa la librería de las cookies ficherocookies = os.path.join( config.get_setting("cookies.dir"), 'cookies.dat' ) - logger.info("[scrapertools.py] ficherocookies="+ficherocookies) + if (DEBUG==True): logger.info("[scrapertools.py] ficherocookies="+ficherocookies) cj = None ClientCookie = None @@ -1093,13 +1093,13 @@ def get_headers_from_response(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 # that has useful load and save methods if os.path.isfile(ficherocookies): - logger.info("[scrapertools.py] Leyendo fichero cookies") + if (DEBUG==True): logger.info("[scrapertools.py] Leyendo fichero cookies") # if we have a cookie file already saved # then load the cookies into the Cookie Jar try: cj.load(ficherocookies) except: - logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") + if (DEBUG==True): logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") os.remove(ficherocookies) opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj),NoRedirectHandler()) @@ -1113,16 +1113,16 @@ def get_headers_from_response(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 # Traza la peticion if post is None: - logger.info("[scrapertools.py] petición GET") + if (DEBUG==True): logger.info("[scrapertools.py] petición GET") else: - logger.info("[scrapertools.py] petición POST") + if (DEBUG==True): logger.info("[scrapertools.py] petición POST") # Array de cabeceras - logger.info("[scrapertools.py] ---------------------------") + if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------") for header in headers: - logger.info("[scrapertools.py] header=%s" % str(header[0])) + if (DEBUG==True): logger.info("[scrapertools.py] header=%s" % str(header[0])) txheaders[header[0]]=header[1] - logger.info("[scrapertools.py] ---------------------------") + if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------") # Construye el request req = Request(url, post, txheaders) @@ -1134,18 +1134,18 @@ def get_headers_from_response(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 # Lee los datos y cierra #data=handle.read() info = handle.info() - logger.info("[scrapertools.py] Respuesta") - logger.info("[scrapertools.py] ---------------------------") + if (DEBUG==True): logger.info("[scrapertools.py] Respuesta") + if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------") location_header="" for header in info: - logger.info("[scrapertools.py] "+header+"="+info[header]) + if (DEBUG==True): logger.info("[scrapertools.py] "+header+"="+info[header]) return_headers.append( [header,info[header]] ) handle.close() - logger.info("[scrapertools.py] ---------------------------") + if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------") # Tiempo transcurrido fin = time.clock() - logger.info("[scrapertools.py] Descargado en %d segundos " % (fin-inicio+1)) + if (DEBUG==True): logger.info("[scrapertools.py] Descargado en %d segundos " % (fin-inicio+1)) return return_headers @@ -1181,14 +1181,14 @@ def get_filename_from_url(url): # Parses the title of a tv show episode and returns the season id + episode id in format "1x01" def get_season_and_episode(title): - logger.info("get_season_and_episode('"+title+"')") + if (DEBUG==True): logger.info("get_season_and_episode('"+title+"')") patron ="(\d+)[x|X](\d+)" matches = re.compile(patron).findall(title) - logger.info(str(matches)) + if (DEBUG==True): logger.info(str(matches)) filename=matches[0][0]+"x"+matches[0][1] - logger.info("get_season_and_episode('"+title+"') -> "+filename) + if (DEBUG==True): logger.info("get_season_and_episode('"+title+"') -> "+filename) return filename diff --git a/xbmctools.py b/xbmctools.py index 1cc87d4..7fb6c5d 100644 --- a/xbmctools.py +++ b/xbmctools.py @@ -777,14 +777,14 @@ def renderItems(itemlist, params, url, category,isPlayable='false'): if item.fanart=="": - channel_fanart = os.path.join( config.get_runtime_path(), 'resources', 'images', 'fanart', item.channel+'.jpg') + ###channel_fanart = os.path.join( config.get_runtime_path(), 'resources', 'images', 'fanart', item.channel+'.jpg') - if os.path.exists(channel_fanart): - item.fanart = channel_fanart - else: + ###if os.path.exists(channel_fanart): + ### item.fanart = channel_fanart + ###else: item.fanart = os.path.join(config.get_runtime_path(),"fanart.jpg") - if item.folder : + if item.folder: if len(item.extra)>0: addnewfolderextra( item.channel , item.action , item.category , item.title , item.url , item.thumbnail , item.plot , extradata = item.extra , totalItems = len(itemlist), fanart=item.fanart , context=item.context, show=item.show, fulltitle=item.fulltitle ) else: