From f955f66789b0078dcb973ce587d2d3b3184e73a7 Mon Sep 17 00:00:00 2001 From: Bryan Newbold Date: Thu, 24 Jan 2019 15:21:43 -0800 Subject: allow importing contrib/refs lists The motivation here isn't really to support these gigantic lists on principle, but to be able to ingest large corpuses without having to decide whether to filter out or crop such lists. --- python/fatcat_tools/importers/crossref.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) (limited to 'python/fatcat_tools') diff --git a/python/fatcat_tools/importers/crossref.py b/python/fatcat_tools/importers/crossref.py index 00c719f1..4a0322e7 100644 --- a/python/fatcat_tools/importers/crossref.py +++ b/python/fatcat_tools/importers/crossref.py @@ -303,9 +303,12 @@ class CrossrefImporter(EntityImporter): # external identifiers extids = self.lookup_ext_ids(doi=obj['DOI'].lower()) - # TODO: filter out huge releases; we'll get them later (and fix bug in - # fatcatd) - if max(len(contribs), len(refs), len(abstracts)) > 750: + # filter out unreasonably huge releases + if len(abstracts) > 100: + return None + if len(refs) > 2000: + return None + if len(refs) > 5000: return None # release date parsing is amazingly complex @@ -322,11 +325,16 @@ class CrossrefImporter(EntityImporter): release_year = raw_date[0] release_date = None + original_title = None + if obj.get('original-title'): + original_title = clean(obj.get('original-title')[0], force_xml=True) + if obj.get('title'): + title = clean(obj.get('title')[0], force_xml=True) re = fatcat_client.ReleaseEntity( work_id=None, container_id=container_id, - title=clean(obj.get('title', [None])[0], force_xml=True), - original_title=clean(obj.get('original-title', [None])[0]), + title=title, + original_title=original_title, release_type=release_type, release_status=release_status, release_date=release_date, -- cgit v1.2.3