From cd2abbd3c8c96326316fe29127ea1f002cef3a84 Mon Sep 17 00:00:00 2001 From: rols1 Date: Sun, 16 Apr 2023 16:39:26 +0200 Subject: [PATCH] =?UTF-8?q?=C3=84nderungen=20/=20Korrekturen=20siehe=20cha?= =?UTF-8?q?ngelog.txt?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- addon.xml | 2 +- ardundzdf.py | 42 ++++-- changelog.txt | 14 ++ resources/lib/ARDnew.py | 275 +++++----------------------------- resources/lib/TagesschauXL.py | 7 +- resources/lib/arte.py | 2 - resources/lib/my3Sat.py | 5 +- resources/lib/util.py | 85 +---------- 8 files changed, 102 insertions(+), 330 deletions(-) diff --git a/addon.xml b/addon.xml index 76b5743..44fb561 100644 --- a/addon.xml +++ b/addon.xml @@ -1,5 +1,5 @@ - + diff --git a/ardundzdf.py b/ardundzdf.py index dcd2f86..dd12bda 100644 --- a/ardundzdf.py +++ b/ardundzdf.py @@ -56,8 +56,8 @@ # VERSION -> addon.xml aktualisieren # 94 # Numerierung für Einzelupdate -VERSION = '4.6.8' -VDATE = '10.04.2023' +VERSION = '4.6.9' +VDATE = '16.04.2023' # (c) 2019 by Roland Scholz, rols1@gmx.de @@ -2805,7 +2805,7 @@ def ARDSportWDR(): addDir(li=li, label=title, action="dirList", dirID="ARDSportLive", fanart=img, thumb=img, fparams=fparams, tagline=tag) - + ''' title = u"Event: [B]NORDISCHE SKI-WM[/B]" # Großevent tag = u"Alles zur Nordischen Ski-WM in Planica." cacheID = "Sport_SkiWM" @@ -2816,7 +2816,8 @@ def ARDSportWDR(): (quote(title), quote(path), quote(img), cacheID) addDir(li=li, label=title, action="dirList", dirID="ARDSportCluster", fanart=img, thumb=img, fparams=fparams, tagline=tag) - + ''' + title = u"Event-Archiv" # Buttons für ältere Events tag = u"Archiv für zurückliegende Groß-Events." img = logo @@ -2864,6 +2865,17 @@ def ARDSportWDRArchiv(): li = xbmcgui.ListItem() li = home(li, ID='ARD') # Home-Button + title = u"Event: [B]NORDISCHE SKI-WM[/B]" # Großevent + tag = u"Alles zur Nordischen Ski-WM in Planica." + cacheID = "Sport_SkiWM" + img = "https://images.sportschau.de/image/237354e3-b9b2-46bf-993a-8ecc48947e7f/AAABhol6U80/AAABg8tMRzY/20x9-1280/constantin-schmid-150.webp" + path = "https://www.sportschau.de/wintersport/nordische-ski-wm" + title=py2_encode(title); path=py2_encode(path); img=py2_encode(img); + fparams="&fparams={'title': '%s', 'path': '%s', 'img': '%s', 'cacheID': '%s'}" %\ + (quote(title), quote(path), quote(img), cacheID) + addDir(li=li, label=title, action="dirList", dirID="ARDSportCluster", fanart=img, thumb=img, + fparams=fparams, tagline=tag) + title = u"Event: [B]Handball-WM 2023 in Polen und Schweden[/B]" # Großevent tag = u"Nachrichten, Berichte, Interviews und Ergebnisse zur Handball-WM 2023 in Polen und Schweden mit dem DHB-Team." cacheID = "Sport_WMHandball" @@ -3502,6 +3514,10 @@ def ARDSportMediaPlayer(li, item): if duration == "0 sec": duration = "unbekannt" + avail = stringextract('av_original_air_time":"', '"', cont) + if avail: + verf = time_translate(avail) + chapter = stringextract('chapter1":"', '"', cont) creator = stringextract('creator":"', '"', cont) genre = stringextract('_genre":"', '"', cont) @@ -3521,6 +3537,8 @@ def ARDSportMediaPlayer(li, item): PLog("duration: " + duration) if live == False and duration: tag = "%s | Dauer %s" % (tag, duration) + if verf: + tag = u"%s | Verfügbar ab [B]%s[/B]" % (tag, verf) tag = "%s\n%s | %s | %s | %s" % (tag, chapter, creator, genre, geo) @@ -3529,7 +3547,7 @@ def ARDSportMediaPlayer(li, item): Plot = tag.replace("\n", "||") PLog("Satz31:") - PLog(player); PLog(live); PLog(title); PLog(mp3_url); PLog(stream_url); + PLog(player); PLog(live); PLog(title); PLog(mp3_url); PLog(stream_url); PLog(avail); return player, live, title, mp3_url, stream_url, img, tag, summ, Plot @@ -8371,7 +8389,7 @@ def ZDFRubrikSingle(title, path, clus_title='', page='', ID='', custom_cluster=' else: # Einzelbeitrag direkt - anders als A-Z (ZDF_get_content) if SETTINGS.getSetting('pref_load_summary') == 'true': # Inhaltstext im Voraus laden? - # get_summary_pre in ZDF_get_teaserDetails bereits erledigt + # get_summary_pre in ZDF_get_teaserDetails bereits erledigt? if teaserDetails == '': skip_verf=False; skip_pubDate=False # beide Daten ermitteln summ_txt = get_summary_pre(path, 'ZDF', skip_verf, skip_pubDate) @@ -8663,7 +8681,7 @@ def ZDF_get_teaserDetails(page, NodePath='', sophId=''): skip_verf=False; skip_pubDate=False # Teaser enth. beide Daten summ_txt = get_summary_pre(path, 'ZDF', skip_verf, skip_pubDate) PLog(len(summ_txt)); - if summ_txt and len(summ_txt) > len(descr): + if len(summ_txt) > len(descr): descr= summ_txt PLog('title: %s, path: %s, img_src: %s, descr: %s, dauer: %s, enddate: %s, isvideo: %s' %\ @@ -8997,7 +9015,7 @@ def International(title): # ZDF_Search, ZDFSportLive, Tivi_Search (Modul childs). # Blockbereich für VERPASST erweitert (umfasst data-station) # 08.01.2021 Anpassung an geänderte Formate bei Hochkant-Videos. -# mark: farbige Markierung in title (wie ARDSearchnew -> get_page_content) +# mark: farbige Markierung in title (wie ARDSearchnew -> get_json_content) # def ZDF_get_content(li, page, ref_path, ID=None, sfilter='Alle ZDF-Sender', skip_list='', mark=''): PLog('ZDF_get_content:'); PLog(ref_path); PLog(ID); PLog(sfilter); @@ -9392,7 +9410,7 @@ def ZDF_get_content(li, page, ref_path, ID=None, sfilter='Alle ZDF-Sender', skip skip_verf = True summ_txt = get_summary_pre(plusbar_path, 'ZDF', skip_verf, skip_pubDate) PLog(len(summary));PLog(len(summ_txt)); - if summ_txt and len(summ_txt) > len(summary): + if len(summ_txt) > len(summary): tag_par= "%s\n\n%s" % (tagline, summ_txt) tag_par = tag_par.replace('\n', '||') summary = summ_txt @@ -9425,7 +9443,7 @@ def ZDF_get_content(li, page, ref_path, ID=None, sfilter='Alle ZDF-Sender', skip # fname: Dateinamen der Liste (full_shows_ZDF, full_shows_ARD) # Rückgabe: fett-markierter Titel bei entspr. Beitrag, sonst unbeh. # Titel -# Aufrufer: ZDF_get_content, get_page_content (ARDnew) +# Aufrufer: ZDF_get_content, get_json_content (ARDnew) # def full_shows(title, title_samml, summary, duration, fname): PLog('full_shows:') @@ -9463,6 +9481,7 @@ def full_shows(title, title_samml, summary, duration, fname): PLog("duration: " + duration) title = title.strip() + ret = "nofill" # Return-Flag for show in shows: st, sdur = show.split("|") # Bsp. Querbeet|40 #PLog(duration); PLog(st); PLog(sdur); # PLog(up_low(st) in up_low(title)); @@ -9474,8 +9493,9 @@ def full_shows(title, title_samml, summary, duration, fname): PLog("sdur: " + sdur) if int(duration) >= int(sdur): title = "[B]%s[/B]" % title + ret = "fill" break - PLog("return: " + title) + PLog("%s_return: %s" % (ret, title)) return title #----------------------------------------------------------------------- # class="b-playerbox in page auswerten (1 od. mehrere) diff --git a/changelog.txt b/changelog.txt index e9fbd38..d391271 100644 --- a/changelog.txt +++ b/changelog.txt @@ -10,6 +10,20 @@ CHANGE HISTORY max_col 97 -------------- +16.04.2023 4.6.9 + Audio_get_items_single (Audiothek): Fix img-Auswertung (fehlte: '"'). + get_page_content (ARD): neues Blockmerkmal nach ARD-Änderung (images). + seconds_translate (util): str(seconds) ergänzt für json-Werte. + ARDnew: Auswertung umgestellt (get_page_content -> get_json_content) für + ARDSearchnew, ARDStartRubrik, ARDRubriken, SendungenAZ_ARDnew, ARDStart, + ARDStartRegion, MainNEW, ARDVerpasstContent, ARDStartSingle. + menu_hub (TagesschauXL): get_page_content -> get_json_content + get_foruminfo (tools): Anpassung Forum-Hinweis an Forum-Update. + get_summary_pre (util): Code für ARDClassic entfernt (obsolet). + ARDSportWDR: Event NORDISCHE SKI-WM -> "Event-Archiv. + ARDSportMediaPlayer (ARDSportWDR): Datum Stream-Verfügbarkeit in Player- + Daten ergänzt (weggefallen in html-Quellen). + 09.04.2023 4.6.8 get_forms (funk): Anpassung int/float für Bandbreite , Sortierung via lambda-Funktion, Abgleich für Sofortststart umgestellt auf Auflösung diff --git a/resources/lib/ARDnew.py b/resources/lib/ARDnew.py index 0bbe100..0ef6709 100644 --- a/resources/lib/ARDnew.py +++ b/resources/lib/ARDnew.py @@ -1228,206 +1228,9 @@ def ARD_get_strmStream(url, title, img, Plot): # Seiten sind hier bereits senderspezifisch. # Aufrufe Direktsprünge # 07.04.2023 skip Subrubrik Übersicht (aktuelle Seite) -# -def get_page_content(li, page, ID, mark='', mehrzS=''): - PLog('get_page_content: ' + ID); PLog(mark) - ID_org=ID - - CurSender = Dict("load", 'CurSender') # Debug, Seite bereits senderspez. - sendername, sender, kanal, img, az_sender = CurSender.split(':') - PLog(sender) #-> href - - mediatype=''; pagetitle='' - pagination = stringextract('pagination":', '"type"', page) - if ID == "A-Z" or ID == "Search": - pagetitle = stringextract('title":"', '"', pagination)# bei Suche: SearchCompilationWidget:.. - PLog("pagetitle: " + pagetitle) - page = page.replace('\\"', '*') # quotierte Marks entf., Bsp. \"query\" - - if 'Livestream' in ID or 'EPG' in ID: - gridlist = blockextract('"broadcastedOn"', page) - else: - gridlist = blockextract( '"availableTo"', page) - if ID == 'Search': # Search immer Einzelbeiträge - mehrfach = False - #gridlist = blockextract( '"ondemand"', page) # ondemand: neuester Beitrag kann fehlen - else: - if len(gridlist) == 0: # Altern. - gridlist = blockextract('id":"Link:', page) # deckt auch Serien in Swiper ab - - if 'ARDStart' in ID: # zusätzl. Beiträge ganz links, Livestream - decorlist = blockextract( '"decor":', page) # möglich, s.u., umfasst ID ARDStartRubrik - PLog('decorlist: ' + str(len(decorlist))) - gridlist = gridlist + decorlist # 30.01.2022 (Filter href in skip_list) - - if len(gridlist) == 0: # Fallback (außer Livestreams) - #gridlist = blockextract( '"images":', page) # geändert 20.09.2019 - gridlist = blockextract( '"availableTo":', page) # geändert 10.11.2021 - if len(gridlist) == 0: # 09.01.2022 Fallback für A-Z-Inhalte - gridlist = blockextract( '"decor":', page) - if len(gridlist) == 0: # 12.04.2023 Fallback für Menü-Inhalte - gridlist = blockextract( '"images":', page) - - if len(gridlist) == 0: - msg1 = 'keine Beiträge gefunden' - PLog(msg1) - MyDialog(msg1, '', '') - PLog('gridlist: ' + str(len(gridlist))) - - skiplist=[] - for s in gridlist: - uhr=''; ID=ID_org; duration=''; - PLog("Mark10") - - mehrfach = True # Default weitere Rubriken - if 'target":{"id":"' in s: - targetID= stringextract('target":{"id":"', '"', s) # targetID, auch Search - else: - targetID= stringextract('id":"Link:', '"', s) # Serie in Swiper via ARDStartSingle - if targetID == "": - links = stringextract('target":', '}', s) - targetID= stringextract('href:"', '"', links) - PLog("targetID: " + targetID) - if targetID == '': # kein Video - continue - - PLog('"availableTo":null' in s) # kein Einzelbetrag - if '/compilation/' in s or '/grouping/' in s: # Serie Vorrang vor z.B. Teaser - mehrfach = True - if ID == 'EPG': - mehrfach = False - if '"duration":' in s: # Einzelbetrag - mehrfach = False - # Live-Stream od. -Aufzeichnung (Bsp. ARD Sport): - if 'type":"live"' in s or '"type":"event"' in s or 'Livestream' in ID: - mehrfach = False - - href='' - if mehrfach == True: # Pfad für Mehrfachbeiträge ermitteln - url_parts = ['/grouping/', '/compilation/', '/editorial/', '/page-gateway/pages/'] - hreflist = blockextract('"href":"', s) - #PLog("hreflist: " + str (hreflist)) - for h in hreflist: - for u in url_parts: - if u in h: - href = stringextract('"href":"', '"', h) - break - else: - hreflist = blockextract('"href":"', s) - for h in hreflist: - if 'embedded=true' in h: - href = stringextract('"href":"', '"', h) - break - # PLog("href: " + str (href)) - - title='' - if 'longTitle":"' in s: - title = stringextract('longTitle":"', '"', s) - if title == '': - title = stringextract('mediumTitle":"', '"', s) - if title == '': - title = stringextract('shortTitle":"', '"', s) - title = transl_json(title) # <1u002F2> = <1/2> - title = unescape(title); - title = repl_json_chars(title) - - if mehrzS: # Setting pref_more - title = u"Mehr: %s" % title - if mark == "Subrubriken": - if title.startswith("Übersicht"): # skip Subrubrik Übersicht (rekursiv, o. Icons) - continue - - if mark: - PLog(title); PLog(mark) - title = title.strip() - # title = make_mark(mark, title, "red") # farbige Markierung - title = make_mark(mark, title, "", bold=True) # farbige Markierung - - img = stringextract('src":"', '"', s) - img = img.replace('{width}', '640'); - img = img.replace('u002F', '/') - if img == "": # Subrubriken - img = R(ICON_DIR_FOLDER) - - summ='' - if ID != 'Livestream' and mehrfach == False:# mehrfach: summ=Folgeseiten - PLog("pre: %s" % s[:80]) # Verfügbar + Sendedatum aus s laden (nicht Zielseite) - summ = get_summary_pre(path='dummy', ID='ARDnew', skip_verf=False, skip_pubDate=False, page=s) - else: - summ = title - if "Sendedatum:" in summ: # aus Rückabe get_summary_pre - uhr = summ.split(' ')[-2] - - title = repl_json_chars(title); summ = repl_json_chars(summ); - # ARDVerpasstContent: Zeit im Titel, Langfass. tagline: - if ID == 'EPG' and uhr: - title = "[COLOR blue]%s[/COLOR] | %s" % (uhr, title) - pubServ = stringextract('publicationService":{"name":"', '"', s) # publicationService (Sender) - if pubServ: - summ = "%sSender: %s" % (summ, pubServ) - - PLog('Satz:'); - PLog(mehrfach); PLog(title); PLog(href); PLog(img); PLog(summ[:60]); PLog(ID) - - if href == '': - continue - if href in skiplist: - continue - skiplist.append(href) - - if SETTINGS.getSetting('pref_usefilter') == 'true': # Filter - filtered=False - for item in AKT_FILTER: - if up_low(item) in py2_encode(up_low(s)): - filtered = True - break - if filtered: - PLog('filtered: <%s> in %s ' % (item, title)) - continue - - if mehrfach: - summ = "Folgeseiten" - href=py2_encode(href); title=py2_encode(title); - fparams="&fparams={'path': '%s', 'title': '%s'}" % (quote(href), quote(title)) - addDir(li=li, label=title, action="dirList", dirID="resources.lib.ARDnew.ARDStartRubrik", fanart=img, thumb=img, - fparams=fparams, summary=summ, mediatype='') - else: - PLog("check_full_shows") # full_show im Titel: ganze Sendungen rot+fett - if ID != 'EPG' and ID != 'Search': # bei Suche Absturz nach Video-Sofortstart - if pagetitle == '': - if '"homepage":' in s: # Home-Titel kann fehlenden Sendungstitel enthalten - pagetitle = stringextract('"homepage":', '}', s) - pagetitle = stringextract('"title":"', '"', pagetitle) - title_samml = "%s|%s" % (title, pagetitle) # Titel + Seitentitel (A-Z, Suche) - duration = stringextract('duration":', ',', s) # sec-Wert - duration = seconds_translate(duration) # 0:15 - if SETTINGS.getSetting('pref_mark_full_shows') == 'true': - title = ardundzdf.full_shows(title, title_samml, summ, duration, "full_shows_ARD") - - if SETTINGS.getSetting('pref_load_summary') == 'true': # summary (Inhaltstext) im Voraus holen - summ_new = get_summary_pre(path=href, ID='ARDnew', duration=duration) # s.o. pre: - if summ_new: - summ = summ_new - - if SETTINGS.getSetting('pref_video_direct') == 'true': # Sofortstart? - mediatype='video' - - if '"type":"live"' in s or '"type":"event"' in s: # Livestream in Stage od. ARD Sport - ID = "Livestream" - summ = "%s | [B][COLOR red]Livestream[/COLOR][/B]" % summ - else: - ID=ID_org - - - summ_par = summ.replace('\n', '||') - href=py2_encode(href); title=py2_encode(title); summ_par=py2_encode(summ_par); - fparams="&fparams={'path': '%s', 'title': '%s', 'summary': '%s', 'ID': '%s'}" %\ - (quote(href), quote(title), quote(summ_par), ID) - addDir(li=li, label=title, action="dirList", dirID="resources.lib.ARDnew.ARDStartSingle", fanart=img, thumb=img, - fparams=fparams, summary=summ, mediatype=mediatype) - - return li - +# 14.04.2023 get_page_content -> get_json_content +# gelöscht: def get_page_content(li, page, ID, mark='', mehrzS=''): +# #--------------------------------------------------------------------------------------------------- # 14.04.2023 get_page_content -> get_json_content # @@ -1440,44 +1243,44 @@ def get_json_content(li, page, ID, mark='', mehrzS=''): PLog(sender) #-> href mediatype=''; pagetitle='' + PLog(page[:80]) jsonpath = "teasers" # Default - if "SearchCompilationWidget" in page: # Suchergebnis - pagetitle = stringextract('CompilationWidget:', '"', page)# SearchCompilationWidget:.. - if page.startswith(u'{"aZContent"'): - pagetitle = stringextract('title":"', '"', page) # Buchstabe A, B.. if page.startswith(u'{"binaryFeatures"'): jsonpath = "widgets|0|teasers" - pagetitle = stringextract('title":"', '"', page) # Titel 1. image - PLog("pagetitle: " + pagetitle) - page = page.replace('\\"', '*') - PLog(page[:80]) try: - obs = json.loads(page) - PLog(len(obs)) - obs = GetJsonByPath(jsonpath, obs) + page_obs = json.loads(page) + PLog(len(page_obs)) + obs = GetJsonByPath(jsonpath, page_obs) except Exception as exception: PLog(str(exception)) obs=[] + if len(obs) == 0: # Altern. + jsonpath = "widgets|0|teasers" + try: + obs = GetJsonByPath(jsonpath, page_obs) + except Exception as exception: + PLog(str(exception)) + obs=[] PLog("obs: %d" % len(obs)) - typ_single_list = ["live", "event", "broadcastMainClip", # Einzelbeträge - "ondemand"] + # typ-Info Einzelbeträge: ["live", "event", "broadcastMainClip", + # "ondemand", "poster"] + for s in obs: PLog("Mark10") + PLog(str(s)[:60]) uhr=''; ID=ID_org; duration=''; summ=''; availableTo=''; matRat="Ohne" + typ = s["type"] + if "availableTo" in s: + availableTo = s["availableTo"] - try: - if "availableTo" in s: # fehlt u.a. bei EPG - availableTo = s["availableTo"] # Einzelbetrag - typ = s["type"] - mehrfach = False - except Exception as exception: - PLog(str(exception)) - - if typ not in typ_single_list: - mehrfach = True # Default weitere Rubriken + typ = s["type"] + if "duration" in s or "broadcastedOn" in s: # broadcastedOn: Livestream + mehrfach = False # Default Einzelbetrag + else: + mehrfach = True # Default weitere Rubriken try: imgsrc = s["images"]["aspect16x9"] @@ -1489,10 +1292,10 @@ def get_json_content(li, page, ID, mark='', mehrzS=''): title = s["longTitle"] title = repl_json_chars(title) - if mark: + if mark: # Markierung Suchbegriff PLog(title); PLog(mark) title = title.strip() - title = make_mark(mark, title, "", bold=True) # Markierung + title = make_mark(mark, title, "", bold=True) # -> util if mehrzS: # Setting pref_more title = u"Mehr: %s" % title @@ -1523,14 +1326,14 @@ def get_json_content(li, page, ID, mark='', mehrzS=''): if "show" in s: if s["show"]: # null? - summ = s["show"]["synopsis"] + summ = s["show"]["synopsis"] # Zusammenfassung PLog(summ[:60]) if summ == None: summ = "" summ = repl_json_chars(summ) - if typ != "live": - verf = availableTo # s.o. + verf = availableTo # s.o. + if "live" not in typ: # nicht in Livestreams if verf == None: verf="" verf = time_translate(verf) @@ -1549,7 +1352,7 @@ def get_json_content(li, page, ID, mark='', mehrzS=''): else: summ = title - if "Sendedatum:" in summ: # aus Rückabe get_summary_pre + if "Sendedatum:" in summ: uhr = summ.split(' ')[-2] summ = repl_json_chars(summ) @@ -1571,7 +1374,8 @@ def get_json_content(li, page, ID, mark='', mehrzS=''): continue PLog('Satz:'); - PLog(mehrfach); PLog(title); PLog(href); PLog(img); PLog(summ[:60]); PLog(duration); + PLog(mehrfach); PLog(title); PLog(href); PLog(img); PLog(summ[:60]); + PLog(duration); PLog(availableTo); if mehrfach: summ = "Folgeseiten" @@ -1587,13 +1391,14 @@ def get_json_content(li, page, ID, mark='', mehrzS=''): pagetitle = stringextract('"title":"', '"', pagetitle) title_samml = "%s|%s" % (title, pagetitle) # Titel + Seitentitel (A-Z, Suche) if SETTINGS.getSetting('pref_mark_full_shows') == 'true': - if "duration" in s: - dur = s["duration"]; - dur = str(int(dur)) - title = ardundzdf.full_shows(title, title_samml, summ, dur, "full_shows_ARD") + if ID != "Search": # Vorrang Suchmarkierung vor full_shows + if "duration" in s: + dur = s["duration"]; + dur = str(int(dur)) + title = ardundzdf.full_shows(title, title_samml, summ, dur, "full_shows_ARD") if SETTINGS.getSetting('pref_load_summary') == 'true': # summary (Inhaltstext) im Voraus holen - summ_new = get_summary_pre(path=href, ID='ARDnew', duration=duration) # s.o. pre: + summ_new = get_summary_pre(path=href, ID='ARDnew', duration=duration) # Modul util if summ_new: summ = summ_new diff --git a/resources/lib/TagesschauXL.py b/resources/lib/TagesschauXL.py index b04f341..0f4ed9f 100644 --- a/resources/lib/TagesschauXL.py +++ b/resources/lib/TagesschauXL.py @@ -4,7 +4,7 @@ # Modul für für die Inhalte von tagesschau.de ################################################################################ # 7 # Numerierung für Einzelupdate -# Stand: 20.02.2023 +# Stand: 15.04.2023 # # Anpassung Python3: Modul future # Anpassung Python3: Modul kodi_six + manuelle Anpassungen @@ -45,7 +45,7 @@ # import ardundzdf reicht nicht für thread_getpic from ardundzdf import * # transl_wtag, get_query, thread_getpic, # ZDF_SlideShow, Parseplaylist, test_downloads -from resources.lib.ARDnew import get_page_content # ARD_bab +from resources.lib.ARDnew import get_json_content # ARD_bab from resources.lib.util import * @@ -262,6 +262,7 @@ def Main_XL(): # nun auf der Webseite als quoted json eingebettet, Direktsprung zu XLGetSourcesHTML # entfällt - Auswertung nun über vorgeschaltete Funktion XLSinglePage -> # XLGetSourcesJSON +# 15.04.2023 get_page_content -> get_json_content # def menu_hub(title, path, ID, img): PLog('menu_hub:') @@ -299,7 +300,7 @@ def menu_hub(title, path, ID, img): # if ID == 'ARD_bab': # 14.02.2023 umgestellt auf api mark=''; ID="XL_menu_hub" - li = get_page_content(li, page, ID, mark) # -> ARDnew + li = get_json_content(li, page, ID, mark) # -> ARDnew else: li = get_content(li, page, ID=ID, path=path) diff --git a/resources/lib/arte.py b/resources/lib/arte.py index fba839d..aa99bfb 100644 --- a/resources/lib/arte.py +++ b/resources/lib/arte.py @@ -1235,8 +1235,6 @@ def get_ArtePage(caller, title, path, header=''): if path == '': PLog("path_fehlt") return page - # Sicherung - jsonmark = '"props":' # json-Bereich 26.07.2021 angepasst page, msg = get_page(path, GetOnlyRedirect=True)# Permanent-Redirect-Url abfangen url = page diff --git a/resources/lib/my3Sat.py b/resources/lib/my3Sat.py index c0ca126..22e9cb4 100644 --- a/resources/lib/my3Sat.py +++ b/resources/lib/my3Sat.py @@ -442,7 +442,10 @@ def SendungenDatum(SendDate, title): tagline = title_org + ' | ' + zeit if dauer: tagline = tagline + ' | ' + dauer - + + if SETTINGS.getSetting('pref_load_summary') == 'true': # Inhaltstext im Voraus laden? + pass # o. Mehrwert zu descr + title = repl_json_chars(title); sendung = repl_json_chars(sendung) descr = unescape(descr); diff --git a/resources/lib/util.py b/resources/lib/util.py index 19b059a..5534865 100644 --- a/resources/lib/util.py +++ b/resources/lib/util.py @@ -12,7 +12,7 @@ # 17.11.2019 Migration Python3 Modul kodi_six + manuelle Anpassungen # # 45 # Numerierung für Einzelupdate -# Stand: 14.04.2023 +# Stand: 15.04.2023 # Python3-Kompatibilität: from __future__ import absolute_import @@ -2290,14 +2290,8 @@ def get_summary_pre(path,ID='ZDF',skip_verf=False,skip_pubDate=False,page='',pat PLog('get_summary_pre: ' + ID); PLog(path) PLog(skip_verf); PLog(skip_pubDate); PLog(len(page)) - if 'Video?bcastId' in path: # ARDClassic - fname = path.split('=')[-1] # ../&documentId=31984002 - fname = "ID_%s" % fname - else: - fname = path.split('/')[-1] - fname.replace('.html', '') # .html bei ZDF-Links abschneiden - if '?devicetype' in fname: # ARDNew-Zusatz: ?devicetype=pc&embedded=true - fname = fname.split('?devicetype')[0] + fname = path.split('/')[-1] + fname.replace('.html', '') # .html bei ZDF-Links abschneiden fpath = os.path.join(TEXTSTORE, fname) PLog('fpath: ' + fpath) @@ -2328,11 +2322,13 @@ def get_summary_pre(path,ID='ZDF',skip_verf=False,skip_pubDate=False,page='',pat verf=''; if pattern: - ID='' #einzelnes Auswertungsmerkmal + ID='' #einzelnes Auswertungsmerkmal pat1, pat2 = pattern.split('|') summ = stringextract(pat1, pat2, page) summ = repl_json_chars(summ) + #----------------- + if ID == 'ZDF' or ID == '3sat': teaserinfo = stringextract('teaser-info">', '<', page) summ = stringextract('description" content="', '"', page) @@ -2364,6 +2360,8 @@ def get_summary_pre(path,ID='ZDF',skip_verf=False,skip_pubDate=False,page='',pat else: summ = "%s%s" % (pubDate[3:], summ) + #----------------- + if ID == 'ARDnew': page = page.replace('\\"', '*') # Quotierung vor " entfernen, Bsp. \"query\" pubServ = stringextract('"name":"', '"', page) # publicationService (Sender) @@ -2401,73 +2399,6 @@ def get_summary_pre(path,ID='ZDF',skip_verf=False,skip_pubDate=False,page='',pat if duration and summ: summ = "%s\n%s" % (duration, summ) - - # für Classic ist u-Kennz. vor Umlaut-strings erforderlich - if ID == 'ARDClassic': - # summ = stringextract('description" content="', '"', page) # geändert 23.04.2019 - summ = stringextract('itemprop="description">', '<', page) - summ = unescape(summ) - summ = cleanhtml(summ) - summ = repl_json_chars(summ) - if skip_verf == False: - if u'verfügbar bis' in page: - verf = stringextract(u'verfügbar bis ', ' - if verf: # Verfügbar voranstellen - summ = u"[B]Verfügbar bis [COLOR darkgoldenrod]%s[/COLOR][/B]\n\n%s" % (verf, summ) - if skip_pubDate == False: - pubDate = stringextract('Video der Sendung vom', '', '<', page) - if '"mediaDate"' in page: - mediaDate = stringextract('mediaDate">', '<', page) - if '"mediaDuration"' in page: - mediaDuration = stringextract('mediaDuration">', '<', page) - if len(mediaDuration) >= 8: - mediaDuration = mediaDuration + "Std." - else: - mediaDuration = mediaDuration + "Min." - else: - mediaDuration = duration - if mediaDate: - duration = mediaDate - if mediaDuration: - duration = "%s | %s" % (mediaDate, mediaDuration) - - try: # todo: codec-Error einkreisen - if u'"mediaExpiry">' in page: - verf = stringextract(u'"mediaExpiry">', '<', page) - if verf: - verf = u"[B][COLOR darkgoldenrod]%s[/COLOR][/B]" % verf - duration = "%s | %s" % (duration, verf) - PLog("duration: " + duration) - - summ = stringextract('class="einleitung small">', '<', page) - if summ == '': - summ = stringextract('class="text">', '<', page) - if summ == '': - summ = stringextract('teasertext">', '', page) - if 'Falls JavaScript in Ihrem' in summ: - summ = '' - summ = unescape(summ); summ = mystrip(summ) - summ = cleanhtml(summ) - summ = repl_json_chars(summ) - #if u'"mediaTitle">' in page: # nicht verw. - # mtitle = stringextract(u'"mediaTitle">', '"', page) - summ = u"%s | %s\n\n%s" % (duration, mtitle, summ) - except Exception as exception: - PLog(str(exception)) - summ='' page = py2_encode(page) summ = summ.replace(' | | ', '') # Korrek. Leer