commit python-fanficfare for openSUSE:Factory
Hello community, here is the log from the commit of package python-fanficfare for openSUSE:Factory checked in at 2019-04-28 20:15:27 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/python-fanficfare (Old) and /work/SRC/openSUSE:Factory/.python-fanficfare.new.5536 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Package is "python-fanficfare" Sun Apr 28 20:15:27 2019 rev:7 rq:698710 version:3.7.0 Changes: -------- --- /work/SRC/openSUSE:Factory/python-fanficfare/python-fanficfare.changes 2019-03-26 22:31:45.241715462 +0100 +++ /work/SRC/openSUSE:Factory/.python-fanficfare.new.5536/python-fanficfare.changes 2019-04-28 20:15:41.750334864 +0200 @@ -1,0 +2,27 @@ +Sun Apr 28 07:26:14 CEST 2019 - Matej Cepl <mcepl@suse.com> + +- Update to 3.7.0: + - Update translations + - Revert "Remove defunct site www.destinysgateway.com" + - Recognize destinysgateway.com and www.destinysgateway.com + - Remove ncisfic.com -- moved to AO3. + - Fix date format for adapter_gluttonyfictioncom + - Comment out some old debugs. + - Add debug output for encoding used. + - Fix some comments. + - Add another StoryDoesNotExist string for + adapter_fanfictionnet + - Closes #390 - RoyalRoad click link in emails. + - Update a comment link in setup.py. + - Include status 'Hiatus' for adapter_royalroadcom. + - Correct a comment in defaults.ini. + - Fix for saved custom column metadata and boolean values. + - Add bookmarked site specific metadata for + adapter_archiveofourownorg. + - Fix for PI saved metadata not reading False & empty strings. + - More fixing for bool metadata values--convert to string when + set. + - Add remove_authorfootnotes_on_update feature for AO3. + - Update Translations. + +------------------------------------------------------------------- Old: ---- FanFicFare-3.6.0.tar.gz New: ---- FanFicFare-3.7.0.tar.gz ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ python-fanficfare.spec ++++++ --- /var/tmp/diff_new_pack.qF54RS/_old 2019-04-28 20:15:42.186334593 +0200 +++ /var/tmp/diff_new_pack.qF54RS/_new 2019-04-28 20:15:42.186334593 +0200 @@ -20,7 +20,7 @@ %define modnamedown fanficfare %{?!python_module:%define python_module() python-%{**} python3-%{**}} Name: python-fanficfare -Version: 3.6.0 +Version: 3.7.0 Release: 0 Summary: Tool for making eBooks from stories on fanfiction and other web sites License: GPL-3.0-only ++++++ FanFicFare-3.6.0.tar.gz -> FanFicFare-3.7.0.tar.gz ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/calibre-plugin/__init__.py new/FanFicFare-3.7.0/calibre-plugin/__init__.py --- old/FanFicFare-3.6.0/calibre-plugin/__init__.py 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/calibre-plugin/__init__.py 2019-04-19 21:08:56.000000000 +0200 @@ -33,7 +33,7 @@ from calibre.customize import InterfaceActionBase # pulled out from FanFicFareBase for saving in prefs.py -__version__ = (3, 6, 0) +__version__ = (3, 7, 0) ## Apparently the name for this class doesn't matter--it was still ## 'demo' for the first few versions. diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/calibre-plugin/fff_plugin.py new/FanFicFare-3.7.0/calibre-plugin/fff_plugin.py --- old/FanFicFare-3.6.0/calibre-plugin/fff_plugin.py 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/calibre-plugin/fff_plugin.py 2019-04-19 21:08:56.000000000 +0200 @@ -2139,15 +2139,15 @@ val = unicode(val).replace(",","") else: val = val + if coldef['datatype'] == 'bool': + if val.lower() in ('t','true','1','yes','y'): + val = True + elif val.lower() in ('f','false','0','no','n'): + val = False + else: + val = None # for tri-state 'booleans'. Yes/No/Null + # logger.debug("setting 'r' or 'added':meta:%s label:%s val:%s"%(meta,label,val)) if val != '': - if coldef['datatype'] == 'bool': - if val.lower() in ('t','true','1','yes','y'): - val = True - elif val.lower() in ('f','false','0','no','n'): - val = False - else: - val = None # for tri-state 'booleans'. Yes/No/Null - #print("setting 'r' or 'added':%s"%val) self.set_custom(db, book_id, meta, val, label=label, commit=False) if flag == 'a': diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/calibre-plugin/jobs.py new/FanFicFare-3.7.0/calibre-plugin/jobs.py --- old/FanFicFare-3.6.0/calibre-plugin/jobs.py 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/calibre-plugin/jobs.py 2019-04-19 21:08:56.000000000 +0200 @@ -117,8 +117,8 @@ from calibre_plugins.fanficfare_plugin import FanFicFareBase fffbase = FanFicFareBase(options['plugin_path']) - with fffbase: - + with fffbase: # so the sys.path was modified while loading the + # plug impl. from calibre_plugins.fanficfare_plugin.dialogs import (NotGoingToDownload, OVERWRITE, OVERWRITEALWAYS, UPDATE, UPDATEALWAYS, ADDNEW, SKIP, CALIBREONLY, CALIBREONLYSAVECOL) from calibre_plugins.fanficfare_plugin.fanficfare import adapters, writers, exceptions @@ -128,7 +128,7 @@ try: ## No need to download at all. Can happen now due to - ## collision moving into bookfor CALIBREONLY changing to + ## collision moving into book for CALIBREONLY changing to ## ADDNEW when story URL not in library. if book['collision'] in (CALIBREONLY, CALIBREONLYSAVECOL): logger.info("Skipping CALIBREONLY 'update' down inside worker") diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/calibre-plugin/plugin-defaults.ini new/FanFicFare-3.7.0/calibre-plugin/plugin-defaults.ini --- old/FanFicFare-3.6.0/calibre-plugin/plugin-defaults.ini 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/calibre-plugin/plugin-defaults.ini 2019-04-19 21:08:56.000000000 +0200 @@ -192,7 +192,7 @@ ## calibre_author: calibre_author_LIST=>^(.{,100}).*$=>\1 ## ## You can 'split' one list item into multiple list entries by using -## \' in the replacement string. +## \, in the replacement string. ## ## Examples: #replace_metadata: @@ -1120,7 +1120,7 @@ ## entry) as the composite offreeformtags, ao3categories in ## include_in_genre. If there's ever more than 4 series, add ## series04,series04Url etc. -extra_valid_entries:fandoms,freeformtags,freefromtags,ao3categories,comments,kudos,hits,bookmarks,collections,byline,bookmarktags,bookmarksummary,bookmarkprivate,bookmarkrec,series00,series01,series02,series03,series00Url,series01Url,series02Url,series03Url,series00HTML,series01HTML,series02HTML,series03HTML +extra_valid_entries:fandoms,freeformtags,freefromtags,ao3categories,comments,kudos,hits,bookmarks,collections,byline,bookmarked,bookmarktags,bookmarksummary,bookmarkprivate,bookmarkrec,series00,series01,series02,series03,series00Url,series01Url,series02Url,series03Url,series00HTML,series01HTML,series02HTML,series03HTML fandoms_label:Fandoms freeformtags_label:Freeform Tags freefromtags_label:Freeform Tags @@ -1133,6 +1133,7 @@ bookmarks_label:Bookmarks ## Tags & Summary from *your* bookmark on the story. Only collected ## if always_login:true +bookmarked_label:I Bookmarked Story bookmarktags_label:My Bookmark Tags bookmarksummary_label:My Bookmark Summary bookmarkprivate_label:My Bookmark Private @@ -1163,7 +1164,7 @@ include_in_freefromtags:freeformtags ## adds to titlepage_entries instead of replacing it. -#extra_titlepage_entries: fandoms,freeformtags,ao3categories,comments,kudos,hits,bookmarks,bookmarktags,bookmarksummary,series01HTML,series02HTML,series03HTML,byline +#extra_titlepage_entries: fandoms,freeformtags,ao3categories,comments,kudos,hits,bookmarks,bookmarked,bookmarktags,bookmarksummary,series01HTML,series02HTML,series03HTML,byline ## adds to include_subject_tags instead of replacing it. #extra_subject_tags:fandoms,freeformtags,ao3categories @@ -1174,6 +1175,17 @@ ## personal.ini and list the ones you don't want. #exclude_notes:authorheadnotes,chaptersummary,chapterheadnotes,chapterfootnotes,authorfootnotes,inspiredlinks +## AO3 authorfootnotes and inspiredlinks end up in the 'last' chapter, +## but if updated, then there's a new 'last' chapter, leaving multiple +## copies. This option applies the 'skip_on_ffdl_update' class to +## those tags which means they will be removed during epub reading for +## update. This will only effect chapters added after turning the +## setting on. +## Result: Only the last chapter will have end notes. +## Side-effect: An 'Update Always' that doesn't add a new last +## chapter will also remove the end notes. +#remove_authorfootnotes_on_update:false + ## AO3 is blocking people more aggressively. If you download fewer ## stories less often you can likely get by with reducing this sleep. slow_down_sleep_time:2 @@ -1742,12 +1754,6 @@ ## Site dedicated to these categories/characters/ships extracategories:The Office -[ncisfic.com] -## Site dedicated to these categories/characters/ships -extracategories:NCIS - -website_encodings:Windows-1252,utf8 - [nfacommunity.com] ## Some sites do not require a login, but do require the user to ## confirm they are adult for adult content. In commandline version, @@ -2173,6 +2179,14 @@ ## Some sites do not require a login, but do require the user to ## confirm they are adult for adult content. In commandline version, ## this should go in your personal.ini, not defaults.ini. +#is_adult:true + +website_encodings:Windows-1252,utf8 + +[www.destinysgateway.com] +## Some sites do not require a login, but do require the user to +## confirm they are adult for adult content. In commandline version, +## this should go in your personal.ini, not defaults.ini. #is_adult:true website_encodings:Windows-1252,utf8 diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/calibre-plugin/translations/ca.po new/FanFicFare-3.7.0/calibre-plugin/translations/ca.po --- old/FanFicFare-3.6.0/calibre-plugin/translations/ca.po 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/calibre-plugin/translations/ca.po 2019-04-19 21:08:56.000000000 +0200 @@ -2,7 +2,7 @@ # Copyright (C) YEAR ORGANIZATION # # Translators: -# Adolfo Jayme-Barrientos, 2014 +# Fito JB, 2014 # jmontane, 2014 # Queralt Iglesias <queralt.ig92@gmail.com>, 2016 # Robert Antoni Buj Gelonch <rbuj@fedoraproject.org>, 2016-2017 diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/calibre-plugin/translations/de.po new/FanFicFare-3.7.0/calibre-plugin/translations/de.po --- old/FanFicFare-3.6.0/calibre-plugin/translations/de.po 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/calibre-plugin/translations/de.po 2019-04-19 21:08:56.000000000 +0200 @@ -5,6 +5,7 @@ # Ettore Atalan <atalanttore@googlemail.com>, 2014-2016,2018 # ILB, 2014-2017 # jumo, 2016 +# Patrick Wacker <crayzyone@gmail.com>, 2019 # Sebastian Keller <Haggard@gmx.de>, 2015 # Simon_Schuette <simonschuette@arcor.de>, 2014-2016 # Simon S, 2015 @@ -14,8 +15,8 @@ msgstr "" "Project-Id-Version: calibre-plugins\n" "POT-Creation-Date: 2018-12-25 23:36+Central Standard Time\n" -"PO-Revision-Date: 2018-12-26 19:43+0000\n" -"Last-Translator: Ettore Atalan <atalanttore@googlemail.com>\n" +"PO-Revision-Date: 2019-04-04 23:23+0000\n" +"Last-Translator: Patrick Wacker <crayzyone@gmail.com>\n" "Language-Team: German (http://www.transifex.com/calibre/calibre-plugins/language/de/)\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" @@ -72,21 +73,21 @@ #: common_utils.py:527 msgid "Save setting for this plugin" -msgstr "" +msgstr "Speichern Sie die Einstellung für dieses Plugin" #: common_utils.py:555 msgid "" "Are you sure you want to edit settings in this library for this plugin?" -msgstr "" +msgstr "Möchten Sie die Einstellungen in dieser Bibliothek für dieses Plugin wirklich bearbeiten?" #: common_utils.py:556 msgid "The FanFicFare team does not support hand edited configurations." -msgstr "" +msgstr "Das FanFicFare-Team unterstützt keine von Hand bearbeiteten Konfigurationen." #: common_utils.py:564 msgid "" "Are you sure you want to save this setting in this library for this plugin?" -msgstr "" +msgstr "Möchten Sie diese Einstellung in dieser Bibliothek wirklich für dieses Plugin speichern?" #: common_utils.py:565 common_utils.py:593 msgid "" @@ -100,7 +101,7 @@ #: common_utils.py:574 msgid "All settings for this plugin in this library have been saved." -msgstr "" +msgstr "Alle Einstellungen für dieses Plugin in dieser Bibliothek wurden gespeichert." #: common_utils.py:575 common_utils.py:604 msgid "Please restart calibre now." @@ -275,7 +276,7 @@ msgid "" "Warn you if an update will change the URL of an existing book(normally automatic and silent).\n" "URLs may be changed from http to https silently if the site changed." -msgstr "" +msgstr "Warnt Sie, wenn ein Update die URL eines vorhandenen Buchs ändert (normalerweise automatisch und stumm).\nURLs können unbemerkt von http in https geändert werden, wenn sich die Seite geändert hat." #: config.py:519 msgid "Search inside ebooks for Story URL?" @@ -408,7 +409,7 @@ "When checking <i>If Story Already Exists</i> FanFicFare will first match by " "URL Identifier. But if not found, it can also search existing books by " "Title and Author(s)." -msgstr "" +msgstr "Beim Überprüfen, <i>ob die Story bereits vorhanden ist</i>, wird FanFicFare zuerst anhand der URL-Kennung abgeglichen. Wird er nicht gefunden, können vorhandene Bücher auch nach Titel und Autor (en) durchsucht werden." #: config.py:604 msgid "Reject List" @@ -640,7 +641,7 @@ msgid "" "Menu option to remove from \"To Read\" lists will also remove \"(new)\" " "chapter marks created by personal.ini <i>mark_new_chapters</i> setting." -msgstr "" +msgstr "Die Menüoption zum Entfernen aus \"To Read\" -Listen entfernt auch \"(neu)\" Kapitelmarkierung, die mit der Einstellung \"personal.ini mark_new_chapters\" erstellt wurden." #: config.py:947 msgid "" diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/calibre-plugin/translations/es.po new/FanFicFare-3.7.0/calibre-plugin/translations/es.po --- old/FanFicFare-3.6.0/calibre-plugin/translations/es.po 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/calibre-plugin/translations/es.po 2019-04-19 21:08:56.000000000 +0200 @@ -2,7 +2,7 @@ # Copyright (C) YEAR ORGANIZATION # # Translators: -# Adolfo Jayme-Barrientos, 2014 +# Fito JB, 2014 # Albert, 2016 # Darío Hereñú <magallania@gmail.com>, 2015-2016,2018 # Darío Hereñú <magallania@gmail.com>, 2018 diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/calibre-plugin/translations/et.po new/FanFicFare-3.7.0/calibre-plugin/translations/et.po --- old/FanFicFare-3.6.0/calibre-plugin/translations/et.po 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/calibre-plugin/translations/et.po 2019-04-19 21:08:56.000000000 +0200 @@ -7,7 +7,7 @@ msgstr "" "Project-Id-Version: calibre-plugins\n" "POT-Creation-Date: 2018-12-25 23:36+Central Standard Time\n" -"PO-Revision-Date: 2019-03-03 19:13+0000\n" +"PO-Revision-Date: 2019-04-13 19:57+0000\n" "Last-Translator: Maidur\n" "Language-Team: Estonian (http://www.transifex.com/calibre/calibre-plugins/language/et/)\n" "MIME-Version: 1.0\n" @@ -19,7 +19,7 @@ #: __init__.py:51 msgid "UI plugin to download FanFiction stories from various sites." -msgstr "Kasutajaliidese plugin mitmetelt saitidelt 'FanFiction'-juttude allalaadimiseks." +msgstr "Kasutajaliidese plugin mitmetelt saitidelt fännikirjanduse juttude allalaadimiseks." #: __init__.py:124 msgid "" @@ -149,7 +149,7 @@ msgid "" "These settings control the basic features of the plugin--downloading " "FanFiction." -msgstr "Need sätted juhivad plugina põhilisi funktsioone -- FanFictioni allalaadimist." +msgstr "Need sätted juhivad plugina põhilisi funktsioone -- fännikirjanduse allalaadimist." #: config.py:432 msgid "Defaults Options on Download" @@ -1511,7 +1511,7 @@ #: dialogs.py:1175 msgid "Delete Books (including books without FanFiction URLs)?" -msgstr "Kustutada raamatud (k.a ilma FanFiction URLita raamatud)?" +msgstr "Kustutada raamatud (k.a ilma fännikirjanduse URLita raamatud)?" #: dialogs.py:1176 msgid "Delete the selected books after adding them to the Rejected URLs list." @@ -1569,7 +1569,7 @@ #: fff_plugin.py:138 msgid "Download FanFiction stories from various web sites" -msgstr "Laadi erinevatelt veebisaitidelt alla FanFiction-jutte" +msgstr "Laadi erinevatelt veebisaitidelt alla fännikirjanduse jutte" #: fff_plugin.py:299 msgid "&Download from URLs" @@ -1577,11 +1577,11 @@ #: fff_plugin.py:301 msgid "Download FanFiction Books from URLs" -msgstr "Laadi FanFiction-raamatuid alla URLidelt" +msgstr "Laadi fännikirjanduse raamatuid alla URLidelt" #: fff_plugin.py:304 msgid "&Update Existing FanFiction Books" -msgstr "Uuenda olemasolevaid FanFiction-raamatuid" +msgstr "Uuenda olemasolevaid fännikirjanduse raamatuid" #: fff_plugin.py:309 msgid "Get Story URLs from &Email" @@ -1597,7 +1597,7 @@ #: fff_plugin.py:321 msgid "Make FanFiction Anthology Epub from URLs" -msgstr "Loo URLidest FanFiction antoloogia EPUB" +msgstr "Loo URLidest fännikirjanduse antoloogia EPUB" #: fff_plugin.py:324 msgid "Make Anthology Epub from Web Page" @@ -1605,7 +1605,7 @@ #: fff_plugin.py:326 msgid "Make FanFiction Anthology Epub from Web Page" -msgstr "Loo FanFiction antoloogia EPUB veebilehelt" +msgstr "Loo fännikirjanduse antoloogia EPUB veebilehelt" #: fff_plugin.py:329 msgid "Update Anthology Epub" @@ -1613,7 +1613,7 @@ #: fff_plugin.py:331 msgid "Update FanFiction Anthology Epub" -msgstr "Uuenda FanFictioni antoloogia EPUBi" +msgstr "Uuenda fännikirjanduse antoloogia EPUBi" #: fff_plugin.py:338 msgid "Mark Unread: Add to \"To Read\" and \"Send to Device\" Lists" @@ -2068,7 +2068,7 @@ #: fff_plugin.py:1545 msgid "Download %s FanFiction Book(s)" -msgstr "Laadi alla %s FanFiction raamat(ut)" +msgstr "Laadi alla %s fännikirjanduse raamat(ut)" #: fff_plugin.py:1552 msgid "Starting %d FanFicFare Downloads" @@ -2236,7 +2236,7 @@ #: jobs.py:74 msgid "Downloading FanFiction Stories" -msgstr "FanFiction-juttude allalaadimine" +msgstr "Fännikirjanduse juttude allalaadimine" #: jobs.py:90 msgid "%d of %d stories finished downloading" diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/calibre-plugin/translations/pt_BR.po new/FanFicFare-3.7.0/calibre-plugin/translations/pt_BR.po --- old/FanFicFare-3.6.0/calibre-plugin/translations/pt_BR.po 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/calibre-plugin/translations/pt_BR.po 2019-04-19 21:08:56.000000000 +0200 @@ -6,13 +6,14 @@ # Paulo_Neto <layoutbr@lexxa.com.br>, 2014-2015,2017 # Patricia Tufanetto <ptufanetto@gmail.com>, 2018 # Paulo Márcio da Hora <paulomhora@gmail.com>, 2019 +# Thiago Feldhaus <thiagofcf@gmail.com>, 2019 # Wagner Marques <wagnermarques00@hotmail.com>, 2015 msgid "" msgstr "" "Project-Id-Version: calibre-plugins\n" "POT-Creation-Date: 2018-12-25 23:36+Central Standard Time\n" -"PO-Revision-Date: 2019-01-10 08:31+0000\n" -"Last-Translator: Paulo Márcio da Hora <paulomhora@gmail.com>\n" +"PO-Revision-Date: 2019-04-10 16:02+0000\n" +"Last-Translator: Thiago Feldhaus <thiagofcf@gmail.com>\n" "Language-Team: Portuguese (Brazil) (http://www.transifex.com/calibre/calibre-plugins/language/pt_BR/)\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" @@ -69,7 +70,7 @@ #: common_utils.py:527 msgid "Save setting for this plugin" -msgstr "" +msgstr "Salvar configuração para este plugin" #: common_utils.py:555 msgid "" @@ -493,7 +494,7 @@ #: config.py:718 msgid "personal.ini" -msgstr "" +msgstr "personal.ini" #: config.py:725 config.py:829 config.py:830 msgid "Edit personal.ini" @@ -517,7 +518,7 @@ #: config.py:749 msgid "defaults.ini" -msgstr "" +msgstr "defaults.ini" #: config.py:754 msgid "" @@ -531,7 +532,7 @@ #: config.py:766 msgid "Calibre Columns" -msgstr "" +msgstr "Calibre Colunas" #: config.py:773 msgid "" @@ -552,7 +553,7 @@ #: config.py:788 msgid "Show Calibre Column Names" -msgstr "" +msgstr "Mostrar nome de Colunas Calibre" #: config.py:797 msgid "" @@ -671,7 +672,7 @@ #: config.py:1009 msgid "Plugin %(gc)s" -msgstr "" +msgstr "Plugin %(gc)s" #: config.py:1010 msgid "Use plugin to create covers. Additional settings are below." @@ -711,7 +712,7 @@ #: config.py:1046 msgid "%(gc)s(Plugin) Settings" -msgstr "" +msgstr "%(gc)s(Plugin) Configurações" #: config.py:1054 msgid "" @@ -988,7 +989,7 @@ #: config.py:1398 msgid "Save All Errors" -msgstr "" +msgstr "Salvar Todos os Erros" #: config.py:1399 msgid "If unchecked, these errors will not be saved: %s" @@ -1577,7 +1578,7 @@ #: fff_plugin.py:299 msgid "&Download from URLs" -msgstr "" +msgstr "&Download de URLs" #: fff_plugin.py:301 msgid "Download FanFiction Books from URLs" diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/fanficfare/adapters/__init__.py new/FanFicFare-3.7.0/fanficfare/adapters/__init__.py --- old/FanFicFare-3.6.0/fanficfare/adapters/__init__.py 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/fanficfare/adapters/__init__.py 2019-04-19 21:08:56.000000000 +0200 @@ -65,10 +65,10 @@ from . import adapter_dokugacom from . import adapter_iketernalnet from . import adapter_storiesofardacom +from . import adapter_destinysgatewaycom from . import adapter_ncisfictioncom from . import adapter_fanfiktionde from . import adapter_ponyfictionarchivenet -from . import adapter_ncisficcom from . import adapter_themasquenet from . import adapter_pretendercentrecom from . import adapter_darksolaceorg diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/fanficfare/adapters/adapter_archiveofourownorg.py new/FanFicFare-3.7.0/fanficfare/adapters/adapter_archiveofourownorg.py --- old/FanFicFare-3.6.0/fanficfare/adapters/adapter_archiveofourownorg.py 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/fanficfare/adapters/adapter_archiveofourownorg.py 2019-04-19 21:08:56.000000000 +0200 @@ -189,6 +189,10 @@ # actual login so we don't have a case where these show up # for a user only when they get user-restricted stories. try: + # is bookmarked if has update /bookmarks/ form -- + # create bookmark form uses different url + self.story.setMetadata('bookmarked', + None != metasoup.find('form',action=re.compile(r'^/bookmarks/'))) self.story.extendList('bookmarktags', metasoup.find('input',id='bookmark_tag_string')['value'].split(', ')) self.story.setMetadata('bookmarkprivate', @@ -418,6 +422,7 @@ new_tag = save_chapter_soup.new_tag(tag) new_tag.string=string elem.append(new_tag) + return new_tag ## These are the over-all work's 'Notes at the beginning'. ## They only appear on the first chapter in individual chapter @@ -472,6 +477,7 @@ append_tag(save_chapter,'b',"Notes for the Chapter:") save_chapter.append(chapfoot) + skip_on_update_tags = [] ## These are the over-all work's 'Notes at the end'. ## They only appear on the last chapter in individual chapter ## pages and after chapter-# div. Appending removes @@ -482,9 +488,15 @@ if footnotes != None: footnotes = footnotes.find('blockquote') if footnotes: - append_tag(save_chapter,'b',"Author's Note:") + b = append_tag(save_chapter,'b',"Author's Note:") + skip_on_update_tags.append(b) + skip_on_update_tags.append(footnotes) save_chapter.append(footnotes) + ## It looks like 'Inspired by' links now all appear in the ul + ## class=associations tag in authorheadnotes. This code is + ## left in case I'm wrong and there are still stories with div + ## id=children inspired links at the end. if 'inspiredlinks' not in exclude_notes and index+1 == self.num_chapters(): inspiredlinks = whole_dl_soup.find('div', {'id' : "children"}) if inspiredlinks != None: @@ -494,6 +506,22 @@ for alink in inspiredlinks.find_all('a'): if 'http' not in alink['href']: alink['href']='https://' + self.getSiteDomain() + alink['href'] + skip_on_update_tags.append(inspiredlinks) save_chapter.append(inspiredlinks) + ## AO3 story end notes end up in the 'last' chapter, but if + ## updated, then there's a new 'last' chapter. This option + ## applies the 'skip_on_ffdl_update' class to those tags which + ## means they will be removed during epub reading for update. + ## Results: only the last chapter will have end notes. + ## Side-effect: An 'Update Always' that doesn't add a new + ## lasts chapter will remove the end notes. + if self.getConfig("remove_authorfootnotes_on_update"): + for skip_tag in skip_on_update_tags: + if skip_tag.has_attr('class'): + skip_tag['class'].append('skip_on_ffdl_update') + else: + skip_tag['class']=['skip_on_ffdl_update'] + # logger.debug(skip_tag) + return self.utf8FromSoup(url,save_chapter) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/fanficfare/adapters/adapter_destinysgatewaycom.py new/FanFicFare-3.7.0/fanficfare/adapters/adapter_destinysgatewaycom.py --- old/FanFicFare-3.6.0/fanficfare/adapters/adapter_destinysgatewaycom.py 1970-01-01 01:00:00.000000000 +0100 +++ new/FanFicFare-3.7.0/fanficfare/adapters/adapter_destinysgatewaycom.py 2019-04-19 21:08:56.000000000 +0200 @@ -0,0 +1,238 @@ +# -*- coding: utf-8 -*- + +# Copyright 2012 Fanficdownloader team, 2018 FanFicFare team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Software: eFiction +from __future__ import absolute_import +import logging +logger = logging.getLogger(__name__) +import re +from ..htmlcleanup import stripHTML +from .. import exceptions as exceptions + +# py2 vs py3 transition +from ..six import text_type as unicode +from ..six.moves.urllib.error import HTTPError + +from .base_adapter import BaseSiteAdapter, makeDate + +def getClass(): + return DestinysGatewayComAdapter + +# Class name has to be unique. Our convention is camel case the +# sitename with Adapter at the end. www is skipped. +class DestinysGatewayComAdapter(BaseSiteAdapter): + + def __init__(self, config, url): + BaseSiteAdapter.__init__(self, config, url) + + self.username = "NoneGiven" # if left empty, site doesn't return any message at all. + self.password = "" + self.is_adult=False + + # get storyId from url--url validation guarantees query is only sid=1234 + self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1]) + + + # normalized story URL. + self._setURL('http://' + self.getSiteDomain() + '/viewstory.php?sid='+self.story.getMetadata('storyId')) + + # Each adapter needs to have a unique site abbreviation. + self.story.setMetadata('siteabbrev','dgrfa') + + # The date format will vary from site to site. + # http://docs.python.org/library/datetime.html#strftime-strptime-behavior + self.dateformat = "%b %d %Y" + + @staticmethod # must be @staticmethod, don't remove it. + def getSiteDomain(): + # The site domain. Does have www here, if it uses it. + return 'www.destinysgateway.com' + + @classmethod + def getSiteExampleURLs(cls): + return "http://"+cls.getSiteDomain()+"/viewstory.php?sid=1234" + + def getSiteURLPattern(self): + return re.escape("http://"+self.getSiteDomain()+"/viewstory.php?sid=").replace("www\.",r"(www\.)?")+r"\d+$" + + + ## Getting the chapter list and the meta data, plus 'is adult' checking. + def extractChapterUrlsAndMetadata(self): + + if self.is_adult or self.getConfig("is_adult"): + # Weirdly, different sites use different warning numbers. + # If the title search below fails, there's a good chance + # you need a different number. print data at that point + # and see what the 'click here to continue' url says. + addurl = "&warning=4" + else: + addurl="" + + # index=1 makes sure we see the story chapter index. Some + # sites skip that for one-chapter stories. + url = self.url+'&index=1'+addurl + logger.debug("URL: "+url) + + try: + data = self._fetchUrl(url) + except HTTPError as e: + if e.code == 404: + raise exceptions.StoryDoesNotExist(self.url) + else: + raise e + + m = re.search(r"'viewstory.php\?sid=\d+((?:&ageconsent=ok)?&warning=\d+)'",data) + if m != None: + if self.is_adult or self.getConfig("is_adult"): + # We tried the default and still got a warning, so + # let's pull the warning number from the 'continue' + # link and reload data. + addurl = m.group(1) + # correct stupid & error in url. + addurl = addurl.replace("&","&") + url = self.url+'&index=1'+addurl + logger.debug("URL 2nd try: "+url) + + try: + data = self._fetchUrl(url) + except HTTPError as e: + if e.code == 404: + raise exceptions.StoryDoesNotExist(self.url) + else: + raise e + else: + raise exceptions.AdultCheckRequired(self.url) + + if "Access denied. This story has not been validated by the adminstrators of this site." in data: + raise exceptions.AccessDenied(self.getSiteDomain() +" says: Access denied. This story has not been validated by the adminstrators of this site.") + + # use BeautifulSoup HTML parser to make everything easier to find. + soup = self.make_soup(data) + # print data + + # Now go hunting for all the meta data and the chapter list. + + ## Title + a = soup.find('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"$")) + self.story.setMetadata('title',stripHTML(a)) + + # Find authorid and URL from... author url. + a = soup.find('a', href=re.compile(r"viewuser.php\?uid=\d+")) + self.story.setMetadata('authorId',a['href'].split('=')[1]) + self.story.setMetadata('authorUrl','http://'+self.host+'/'+a['href']) + self.story.setMetadata('author',a.string) + + # Find the chapters: + for chapter in soup.findAll('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"&chapter=\d+$")): + # just in case there's tags, like <i> in chapter titles. + self.add_chapter(chapter,'http://'+self.host+'/'+chapter['href']+addurl) + + + # eFiction sites don't help us out a lot with their meta data + # formating, so it's a little ugly. + + # utility method + def defaultGetattr(d,k): + try: + return d[k] + except: + return "" + + # <span class="label">Rated:</span> NC-17<br /> etc + labels = soup.findAll('span',{'class':'label'}) + for labelspan in labels: + value = labelspan.nextSibling + label = labelspan.string + + if 'Summary' in label: + ## Everything until the next span class='label' + svalue = "" + while value and 'label' not in defaultGetattr(value,'class'): + svalue += unicode(value) + value = value.nextSibling + self.setDescription(url,svalue) + #self.story.setMetadata('description',stripHTML(svalue)) + + if 'Rated' in label: + self.story.setMetadata('rating', value) + + if 'Word count' in label: + self.story.setMetadata('numWords', value) + + if 'Categories' in label: + cats = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=categories')) + for cat in cats: + self.story.addToList('category',cat.string) + + if 'Genre' in label: + genres = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=1')) + for genre in genres: + self.story.addToList('genre',genre.string) + + if 'Warnings' in label: + warnings = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=class&type_id=2')) + for warning in warnings: + self.story.addToList('warnings',warning.string) + + if 'Completed' in label: + if 'Yes' in value: + self.story.setMetadata('status', 'Completed') + else: + self.story.setMetadata('status', 'In-Progress') + + if 'Published' in label: + self.story.setMetadata('datePublished', makeDate(stripHTML(value), self.dateformat)) + + if 'Updated' in label: + # there's a stray [ at the end. + #value = value[0:-1] + self.story.setMetadata('dateUpdated', makeDate(stripHTML(value), self.dateformat)) + + try: + # Find Series name from series URL. + a = soup.find('a', href=re.compile(r"viewseries.php\?seriesid=\d+")) + series_name = a.string + series_url = 'http://'+self.host+'/'+a['href'] + + # use BeautifulSoup HTML parser to make everything easier to find. + seriessoup = self.make_soup(self._fetchUrl(series_url)) + storyas = seriessoup.findAll('a', href=re.compile(r'^viewstory.php\?sid=\d+$')) + i=1 + for a in storyas: + if a['href'] == ('viewstory.php?sid='+self.story.getMetadata('storyId')): + self.setSeries(series_name, i) + self.story.setMetadata('seriesUrl',series_url) + break + i+=1 + + except: + # I find it hard to care if the series parsing fails + pass + + # grab the text for an individual chapter. + def getChapterText(self, url): + + logger.debug('Getting chapter text from: %s' % url) + + soup = self.make_soup(self._fetchUrl(url)) + + div = soup.find('div', {'id' : 'story'}) + + if None == div: + raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url) + + return self.utf8FromSoup(url,div) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/fanficfare/adapters/adapter_fanficauthorsnet.py new/FanFicFare-3.7.0/fanficfare/adapters/adapter_fanficauthorsnet.py --- old/FanFicFare-3.6.0/fanficfare/adapters/adapter_fanficauthorsnet.py 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/fanficfare/adapters/adapter_fanficauthorsnet.py 2019-04-19 21:08:56.000000000 +0200 @@ -240,15 +240,15 @@ # Status: Completed - Rating: Everyone - Chapters: 1 - Word count: 876 - Genre: Sorrow # Status: In progress - Rating: Mature - Chapters: 39 - Word count: 314,544 - Genre: Drama - Romance div = soup.find('div',{'class':'well'}) - logger.debug(div.find_all('p')[1]) + # logger.debug(div.find_all('p')[1]) metaline = re.sub(r' +',' ',stripHTML(div.find_all('p')[1]).replace('\n',' ')) - logger.debug(metaline) + # logger.debug(metaline) match = re.match(r"Status: (?P<status>.+?) - Rating: (?P<rating>.+?) - Chapters: [0-9,]+ - Word count: (?P<numWords>[0-9,]+?) - Genre: (?P<genre>.+?)$",metaline) if match: - logger.debug(match.group('status')) - logger.debug(match.group('rating')) - logger.debug(match.group('numWords')) - logger.debug(match.group('genre')) + # logger.debug(match.group('status')) + # logger.debug(match.group('rating')) + # logger.debug(match.group('numWords')) + # logger.debug(match.group('genre')) if "Completed" in match.group('status'): self.story.setMetadata('status',"Completed") else: diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/fanficfare/adapters/adapter_fanfictionnet.py new/FanFicFare-3.7.0/fanficfare/adapters/adapter_fanfictionnet.py --- old/FanFicFare-3.6.0/fanficfare/adapters/adapter_fanfictionnet.py 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/fanficfare/adapters/adapter_fanfictionnet.py 2019-04-19 21:08:56.000000000 +0200 @@ -110,7 +110,7 @@ else: raise e - if "Unable to locate story" in data: + if "Unable to locate story" in data or "Story Not Found" in data: raise exceptions.StoryDoesNotExist(url) # some times "Chapter not found...", sometimes "Chapter text diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/fanficfare/adapters/adapter_gluttonyfictioncom.py new/FanFicFare-3.7.0/fanficfare/adapters/adapter_gluttonyfictioncom.py --- old/FanFicFare-3.6.0/fanficfare/adapters/adapter_gluttonyfictioncom.py 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/fanficfare/adapters/adapter_gluttonyfictioncom.py 2019-04-19 21:08:56.000000000 +0200 @@ -38,7 +38,7 @@ def getDateFormat(self): # The date format will vary from site to site. # http://docs.python.org/library/datetime.html#strftime-strptime-behavior - return "%m/%d/%Y" + return "%d/%m/%Y" ################################################################################## ### The Efiction Base Adapter uses the Bulk story to retrieve the metadata, but diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/fanficfare/adapters/adapter_ncisficcom.py new/FanFicFare-3.7.0/fanficfare/adapters/adapter_ncisficcom.py --- old/FanFicFare-3.6.0/fanficfare/adapters/adapter_ncisficcom.py 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/fanficfare/adapters/adapter_ncisficcom.py 1970-01-01 01:00:00.000000000 +0100 @@ -1,213 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2012 Fanficdownloader team, 2018 FanFicFare team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# Software: eFiction -from __future__ import absolute_import -import logging -logger = logging.getLogger(__name__) -import re -from ..htmlcleanup import stripHTML -from .. import exceptions as exceptions - -# py2 vs py3 transition -from ..six import text_type as unicode -from ..six.moves.urllib.error import HTTPError - -from .base_adapter import BaseSiteAdapter, makeDate - -def getClass(): - return NCISFicComAdapter - -# Class name has to be unique. Our convention is camel case the -# sitename with Adapter at the end. www is skipped. -class NCISFicComAdapter(BaseSiteAdapter): - - def __init__(self, config, url): - BaseSiteAdapter.__init__(self, config, url) - - self.username = "NoneGiven" # if left empty, site doesn't return any message at all. - self.password = "" - self.is_adult=False - - # get storyId from url--url validation guarantees query is only storyid=1234 - self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1]) - - - # normalized story URL. - self._setURL('http://' + self.getSiteDomain() + '/viewstory.php?storyid='+self.story.getMetadata('storyId')) - - # Each adapter needs to have a unique site abbreviation. - self.story.setMetadata('siteabbrev','ncisf') - - # The date format will vary from site to site. - # http://docs.python.org/library/datetime.html#strftime-strptime-behavior - self.dateformat = "%m-%d-%y" - - @staticmethod # must be @staticmethod, don't remove it. - def getSiteDomain(): - return 'ncisfic.com' - - @classmethod - def getAcceptDomains(cls): - return ['www.ncisfic.com','ncisfic.com'] - - @classmethod - def getSiteExampleURLs(cls): - return "http://"+cls.getSiteDomain()+"/viewstory.php?storyid=1234" - - def getSiteURLPattern(self): - return re.escape("http://")+"(www\.)?"+re.escape(self.getSiteDomain()+"/viewstory.php?storyid=")+r"\d+$" - - - ## Getting the chapter list and the meta data, plus 'is adult' checking. - def extractChapterUrlsAndMetadata(self): - - # index=1 makes sure we see the story chapter index. Some - # sites skip that for one-chapter stories. - url = self.url - logger.debug("URL: "+url) - - try: - data = self._fetchUrl(url) - except HTTPError as e: - if e.code == 404: - raise exceptions.StoryDoesNotExist(self.url) - else: - raise e - - if "Access denied. This story has not been validated by the adminstrators of this site." in data: - raise exceptions.AccessDenied(self.getSiteDomain() +" says: Access denied. This story has not been validated by the adminstrators of this site.") - - # use BeautifulSoup HTML parser to make everything easier to find. - soup = self.make_soup(data) - # print data - - # Now go hunting for all the meta data and the chapter list. - - ## Title - a = soup.find('h1') - self.story.setMetadata('title',stripHTML(a)) - - # Find authorid and URL from... author url. - a = soup.find('a', href=re.compile(r"authorresults.php\?author=\d+")) - self.story.setMetadata('authorId',a['href'].split('=')[1]) - self.story.setMetadata('authorUrl','http://'+self.host+'/'+a['href']) - self.story.setMetadata('author',a.string) - - # Find the chapters: - for p in soup.findAll('p'): - chapters = p.findAll('a', href=re.compile(r'viewstory.php\?storyid='+self.story.getMetadata('storyId')+"&chapnum=\d+$")) - if len(chapters) > 0: - for chapter in chapters: - # just in case there's tags, like <i> in chapter titles. - self.add_chapter(chapter,'http://'+self.host+'/'+chapter['href']) - break - - self.story.setMetadata('status', 'Completed') - - # <span class="label">Rated:</span> NC-17<br /> etc - labels = soup.findAll('b') - for x in range(2,len(labels)): - value = labels[x].nextSibling - label = labels[x].string - - if 'Summary' in label: - self.setDescription(url,value) - #self.story.setMetadata('description',stripHTML(svalue)) - - if 'Rating' in label: - self.story.setMetadata('rating', stripHTML(value.nextSibling)) - - if 'Word Count' in label: - self.story.setMetadata('numWords', value.string) - - if 'Category' in label: - for cat in value.string.split(', '): - self.story.addToList('category',cat) - if 'Crossover Shows' in label: - for cat in value.string.split(', '): - if "No Show" not in cat: - self.story.addToList('category',cat) - - if 'Character' in label: - for char in value.string.split(', '): - self.story.addToList('characters',char) - - if 'Pairing' in label: - for char in value.string.split(', '): - self.story.addToList('ships',char) - - if 'Warnings' in label: - for warning in value.string.split(', '): - self.story.addToList('warnings',warning) - - if 'Published' in label: - self.story.setMetadata('datePublished', makeDate(stripHTML(value), self.dateformat)) - - if 'Series' in label: - if "No Series" not in value.nextSibling.string: - self.setSeries(stripHTML(value.nextSibling), value.nextSibling.nextSibling.string[2:]) - self.story.setMetadata('seriesUrl','http://'+self.host+'/'+value.nextSibling['href']) - - asoup = self.make_soup(self._fetchUrl(self.story.getMetadata('authorUrl'))) - story=asoup.find('a', href=re.compile(r'viewstory.php\?storyid='+self.story.getMetadata('storyId'))) - - a=story.findNext('font') - if 'Complete' in a.string: - self.story.setMetadata('status', 'Completed') - else: - self.story.setMetadata('status', 'In-Progress') - - a=story.findNext(text=re.compile('Genre')).parent.nextSibling.string.split(', ') - for genre in a: - self.story.setMetadata('genre', genre) - - a=story.findNext(text=re.compile('Archived')) - self.story.setMetadata('datePublished', makeDate(stripHTML(a.parent.nextSibling), self.dateformat)) - self.story.setMetadata('dateUpdated', makeDate(stripHTML(a.parent.nextSibling), self.dateformat)) - - # grab the text for an individual chapter. - def getChapterText(self, url): - - logger.debug('Getting chapter text from: %s' % url) - - soup = self.make_soup(self._fetchUrl(url)) - - div = soup.find('div') - - # bit messy since higly inconsistent - for p in soup.findAll('p', {'align' : 'center'}): - p.extract() - p = soup.findAll('p') - for x in range(0,3): - p[x].extract() - if "Chapters: " in stripHTML(p[3]): - p[3].extract() - for x in range(len(p)-2,len(p)-1): - p[x].extract() - - for p in soup.findAll('h1'): - p.extract() - for p in soup.findAll('h3'): - p.extract() - for p in soup.findAll('a'): - p.extract() - - if None == div: - raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url) - - return self.utf8FromSoup(url,div) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/fanficfare/adapters/adapter_royalroadcom.py new/FanFicFare-3.7.0/fanficfare/adapters/adapter_royalroadcom.py --- old/FanFicFare-3.6.0/fanficfare/adapters/adapter_royalroadcom.py 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/fanficfare/adapters/adapter_royalroadcom.py 2019-04-19 21:08:56.000000000 +0200 @@ -207,8 +207,10 @@ for label in [stripHTML(a) for a in soup.find_all('span', {'class':'label'})]: if 'COMPLETED' == label: self.story.setMetadata('status', 'Completed') - elif ('ONGOING' == label) or ('HIATUS' == label): + elif 'ONGOING' == label: self.story.setMetadata('status', 'In-Progress') + elif 'HIATUS' == label: + self.story.setMetadata('status', 'Hiatus') elif 'Fan Fiction' == label: self.story.addToList('category', 'FanFiction') elif 'Original' == label: diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/fanficfare/cli.py new/FanFicFare-3.7.0/fanficfare/cli.py --- old/FanFicFare-3.6.0/fanficfare/cli.py 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/fanficfare/cli.py 2019-04-19 21:08:56.000000000 +0200 @@ -39,7 +39,7 @@ def pickle_load(f): return pickle.load(f,encoding="bytes") -version="3.6.0" +version="3.7.0" os.environ['CURRENT_VERSION_ID']=version global_cache = 'global_cache' diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/fanficfare/configurable.py new/FanFicFare-3.7.0/fanficfare/configurable.py --- old/FanFicFare-3.6.0/fanficfare/configurable.py 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/fanficfare/configurable.py 2019-04-19 21:08:56.000000000 +0200 @@ -227,6 +227,7 @@ 'always_login':(['archiveofourown.org']+base_xenforo_list,None,boollist), 'use_archived_author':(['archiveofourown.org'],None,boollist), 'use_view_full_work':(['archiveofourown.org'],None,boollist), + 'remove_authorfootnotes_on_update':(['archiveofourown.org'],None,boollist), 'force_login':(['phoenixsong.net'],None,boollist), 'non_breaking_spaces':(['fictionmania.tv'],None,boollist), @@ -359,6 +360,7 @@ 'use_view_full_work', 'always_login', 'exclude_notes', + 'remove_authorfootnotes_on_update', 'exclude_editor_signature', 'extra_logpage_entries', 'extra_subject_tags', @@ -978,7 +980,7 @@ "iso-8859-1"]) for code in decode: try: - #print(code) + logger.debug("Encoding:%s"%code) errors=None if ':' in code: (code,errors)=code.split(':') diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/fanficfare/defaults.ini new/FanFicFare-3.7.0/fanficfare/defaults.ini --- old/FanFicFare-3.6.0/fanficfare/defaults.ini 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/fanficfare/defaults.ini 2019-04-19 21:08:56.000000000 +0200 @@ -248,7 +248,7 @@ ## calibre_author: calibre_author_LIST=>^(.{,100}).*$=>\1 ## ## You can 'split' one list item into multiple list entries by using -## \' in the replacement string. +## \, in the replacement string. ## ## Examples: #replace_metadata: @@ -1154,7 +1154,7 @@ ## entry) as the composite offreeformtags, ao3categories in ## include_in_genre. If there's ever more than 4 series, add ## series04,series04Url etc. -extra_valid_entries:fandoms,freeformtags,freefromtags,ao3categories,comments,kudos,hits,bookmarks,collections,byline,bookmarktags,bookmarksummary,bookmarkprivate,bookmarkrec,series00,series01,series02,series03,series00Url,series01Url,series02Url,series03Url,series00HTML,series01HTML,series02HTML,series03HTML +extra_valid_entries:fandoms,freeformtags,freefromtags,ao3categories,comments,kudos,hits,bookmarks,collections,byline,bookmarked,bookmarktags,bookmarksummary,bookmarkprivate,bookmarkrec,series00,series01,series02,series03,series00Url,series01Url,series02Url,series03Url,series00HTML,series01HTML,series02HTML,series03HTML fandoms_label:Fandoms freeformtags_label:Freeform Tags freefromtags_label:Freeform Tags @@ -1167,6 +1167,7 @@ bookmarks_label:Bookmarks ## Tags & Summary from *your* bookmark on the story. Only collected ## if always_login:true +bookmarked_label:I Bookmarked Story bookmarktags_label:My Bookmark Tags bookmarksummary_label:My Bookmark Summary bookmarkprivate_label:My Bookmark Private @@ -1197,7 +1198,7 @@ include_in_freefromtags:freeformtags ## adds to titlepage_entries instead of replacing it. -#extra_titlepage_entries: fandoms,freeformtags,ao3categories,comments,kudos,hits,bookmarks,bookmarktags,bookmarksummary,series01HTML,series02HTML,series03HTML,byline +#extra_titlepage_entries: fandoms,freeformtags,ao3categories,comments,kudos,hits,bookmarks,bookmarked,bookmarktags,bookmarksummary,series01HTML,series02HTML,series03HTML,byline ## adds to include_subject_tags instead of replacing it. #extra_subject_tags:fandoms,freeformtags,ao3categories @@ -1208,6 +1209,17 @@ ## personal.ini and list the ones you don't want. #exclude_notes:authorheadnotes,chaptersummary,chapterheadnotes,chapterfootnotes,authorfootnotes,inspiredlinks +## AO3 authorfootnotes and inspiredlinks end up in the 'last' chapter, +## but if updated, then there's a new 'last' chapter, leaving multiple +## copies. This option applies the 'skip_on_ffdl_update' class to +## those tags which means they will be removed during epub reading for +## update. This will only effect chapters added after turning the +## setting on. +## Result: Only the last chapter will have end notes. +## Side-effect: An 'Update Always' that doesn't add a new last +## chapter will also remove the end notes. +#remove_authorfootnotes_on_update:false + ## AO3 is blocking people more aggressively. If you download fewer ## stories less often you can likely get by with reducing this sleep. slow_down_sleep_time:2 @@ -1776,12 +1788,6 @@ ## Site dedicated to these categories/characters/ships extracategories:The Office -[ncisfic.com] -## Site dedicated to these categories/characters/ships -extracategories:NCIS - -website_encodings:Windows-1252,utf8 - [nfacommunity.com] ## Some sites do not require a login, but do require the user to ## confirm they are adult for adult content. In commandline version, @@ -2207,6 +2213,14 @@ ## Some sites do not require a login, but do require the user to ## confirm they are adult for adult content. In commandline version, ## this should go in your personal.ini, not defaults.ini. +#is_adult:true + +website_encodings:Windows-1252,utf8 + +[www.destinysgateway.com] +## Some sites do not require a login, but do require the user to +## confirm they are adult for adult content. In commandline version, +## this should go in your personal.ini, not defaults.ini. #is_adult:true website_encodings:Windows-1252,utf8 diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/fanficfare/geturls.py new/FanFicFare-3.7.0/fanficfare/geturls.py --- old/FanFicFare-3.6.0/fanficfare/geturls.py 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/fanficfare/geturls.py 2019-04-19 21:08:56.000000000 +0200 @@ -192,7 +192,7 @@ ## only sent for thread updates, I believe. Email only so ## get_urls_from_page can still get post URLs. href = re.sub(r"/(unread|page-\d+)?(#post-\d+)?",r"/",href) - elif 'clicktracker.royalroad' in href: + elif 'click' in href and 'royalroad' in href: # they've changed the domain at least once logger.debug(href) from .six.moves.urllib.request import build_opener opener = build_opener() diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/fanficfare/story.py new/FanFicFare-3.7.0/fanficfare/story.py --- old/FanFicFare-3.6.0/fanficfare/story.py 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/fanficfare/story.py 2019-04-19 21:08:56.000000000 +0200 @@ -502,9 +502,13 @@ def setMetadata(self, key, value, condremoveentities=True): - # delete + # delete cached replace'd value. if key in self.processed_metadata_cache: del self.processed_metadata_cache[key] + # Fixing everything downstream to handle bool primatives is a + # pain. + if isinstance(value,bool): + value = unicode(value) # keep as list type, but set as only value. if self.isList(key): self.addToList(key,value,condremoveentities=condremoveentities,clear=True) @@ -587,7 +591,9 @@ retlist = [value] for replaceline in self.replacements: (repl_line,metakeys,regexp,replacement,cond_match) = replaceline - #print("replacement tuple:%s"%replaceline) + # logger.debug("replacement tuple:%s"%replaceline) + # logger.debug("key:%s value:%s"%(key,value)) + # logger.debug("value class:%s"%value.__class__.__name__) if (metakeys == None or key in metakeys) \ and isinstance(value,basestring) \ and regexp.search(value): @@ -649,6 +655,7 @@ def dump_html_metadata(self): lines=[] for k,v in sorted(six.iteritems(self.metadata)): + #logger.debug("k:%s v:%s"%(k,v)) classes=['metadata'] if isinstance(v, (datetime.date, datetime.datetime, datetime.time)): classes.append("datetime") @@ -698,12 +705,17 @@ for i in tag.find_all('li'): val.append(i.string) elif 'int' in tag['class']: - val = int(tag.string) + # Python reports true when asked isinstance(<bool>, (int)) + # bools now converted to unicode when set. + if tag.string in ('True','False'): + val = tag.string + else: + val = int(tag.string) else: val = unicode("\n".join([ unicode(c) for c in tag.contents ])) #logger.debug("key(%s)=val(%s)"%(tag['id'],val)) - if val: + if val != None: self.metadata[tag['id']]=val # self.metadata = json.loads(s, object_hook=datetime_decoder) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/setup.py new/FanFicFare-3.7.0/setup.py --- old/FanFicFare-3.6.0/setup.py 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/setup.py 2019-04-19 21:08:56.000000000 +0200 @@ -3,7 +3,7 @@ """A setuptools based setup module. See: -https://packaging.python.org/en/latest/distributing.html +https://packaging.python.org/guides/distributing-packages-using-setuptools/ https://github.com/pypa/sampleproject """ @@ -27,7 +27,7 @@ name=package_name, # Versions should comply with PEP440. - version="3.6.0", + version="3.7.0", description='A tool for downloading fanfiction to eBook formats', long_description=long_description, diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/webservice/app.yaml new/FanFicFare-3.7.0/webservice/app.yaml --- old/FanFicFare-3.6.0/webservice/app.yaml 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/webservice/app.yaml 2019-04-19 21:08:56.000000000 +0200 @@ -1,6 +1,6 @@ # ffd-retief-hrd fanficfare application: fanficfare -version: 3-6-0 +version: 3-7-0 runtime: python27 api_version: 1 threadsafe: true diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/FanFicFare-3.6.0/webservice/index.html new/FanFicFare-3.7.0/webservice/index.html --- old/FanFicFare-3.6.0/webservice/index.html 2019-03-12 16:28:29.000000000 +0100 +++ new/FanFicFare-3.7.0/webservice/index.html 2019-04-19 21:08:56.000000000 +0200 @@ -84,7 +84,7 @@ If you have any problems with this application, please report them in the <a href="https://groups.google.com/group/fanfic-downloader">FanFicFare Google Group</a>. The - <a href="https://3-5-0.fanficfare.appspot.com">previous version</a> + <a href="https://3-6-0.fanficfare.appspot.com">previous version</a> is also available for you to use if necessary. </p> <div id='error'>
participants (1)
-
root