aboutsummaryrefslogtreecommitdiffstats
path: root/python/fatcat_tools/importers
diff options
context:
space:
mode:
authorBryan Newbold <bnewbold@robocracy.org>2021-11-02 11:32:16 -0700
committerBryan Newbold <bnewbold@robocracy.org>2021-11-02 11:32:16 -0700
commit2137be14556a604a06e8b54bbc22d68f7d3cd695 (patch)
tree30a23b96afac28d0af57877b0031f10d8569373b /python/fatcat_tools/importers
parentfdbfb8dc55df8c3739feca8c52c017c56b006573 (diff)
parent6184ecca3a2e072c11482020938566dc8841bf52 (diff)
downloadfatcat-2137be14556a604a06e8b54bbc22d68f7d3cd695.tar.gz
fatcat-2137be14556a604a06e8b54bbc22d68f7d3cd695.zip
Merge branch 'bnewbold-import-fileset'
Diffstat (limited to 'python/fatcat_tools/importers')
-rw-r--r--python/fatcat_tools/importers/__init__.py3
-rw-r--r--python/fatcat_tools/importers/common.py4
-rw-r--r--python/fatcat_tools/importers/fileset_generic.py83
-rw-r--r--python/fatcat_tools/importers/ingest.py248
4 files changed, 335 insertions, 3 deletions
diff --git a/python/fatcat_tools/importers/__init__.py b/python/fatcat_tools/importers/__init__.py
index 9cb18506..a2224081 100644
--- a/python/fatcat_tools/importers/__init__.py
+++ b/python/fatcat_tools/importers/__init__.py
@@ -27,9 +27,10 @@ from .orcid import OrcidImporter
from .arabesque import ArabesqueMatchImporter, ARABESQUE_MATCH_WHERE_CLAUSE
from .wayback_static import auto_wayback_static
from .cdl_dash_dat import auto_cdl_dash_dat
-from .ingest import IngestFileResultImporter, SavePaperNowFileImporter, IngestWebResultImporter, SavePaperNowWebImporter
+from .ingest import IngestFileResultImporter, SavePaperNowFileImporter, IngestWebResultImporter, SavePaperNowWebImporter, IngestFilesetResultImporter, SavePaperNowFilesetImporter
from .shadow import ShadowLibraryImporter
from .file_meta import FileMetaImporter
from .doaj_article import DoajArticleImporter
from .dblp_release import DblpReleaseImporter
from .dblp_container import DblpContainerImporter
+from .fileset_generic import FilesetImporter
diff --git a/python/fatcat_tools/importers/common.py b/python/fatcat_tools/importers/common.py
index e936477c..680b4f9c 100644
--- a/python/fatcat_tools/importers/common.py
+++ b/python/fatcat_tools/importers/common.py
@@ -447,6 +447,10 @@ class EntityImporter:
existing.urls = [u for u in existing.urls if u.url not in redundant_urls]
return existing
+ @staticmethod
+ def generic_fileset_cleanups(existing):
+ return existing
+
def match_existing_release_fuzzy(self, release: ReleaseEntity) -> Optional[Tuple[str, str, ReleaseEntity]]:
"""
This helper function uses fuzzycat (and elasticsearch) to look for
diff --git a/python/fatcat_tools/importers/fileset_generic.py b/python/fatcat_tools/importers/fileset_generic.py
new file mode 100644
index 00000000..f0ad5460
--- /dev/null
+++ b/python/fatcat_tools/importers/fileset_generic.py
@@ -0,0 +1,83 @@
+
+import fatcat_openapi_client
+
+from fatcat_tools import entity_from_dict
+from .common import EntityImporter
+
+
+class FilesetImporter(EntityImporter):
+ """
+ General purpose importer for fileset entities. Simply fileset schema JSON
+ and inserts.
+
+ By default requires release_ids to be non-empty, and will check each
+ release_id to see if a fileset is already associated; if so, skips the
+ import. This behavior may change in the future, and can be disabled.
+
+ Currently only creates (insert), no updates.
+ """
+
+ def __init__(self, api, **kwargs):
+
+ eg_desc = kwargs.pop('editgroup_description', None) or "Generic Fileset entity import"
+ eg_extra = kwargs.pop('editgroup_extra', dict())
+ eg_extra['agent'] = eg_extra.get('agent', 'fatcat_tools.FilesetImporter')
+ kwargs['do_updates'] = bool(kwargs.get("do_updates", False))
+ self.skip_release_fileset_check = bool(kwargs.get("skip_release_fileset_check", False))
+ super().__init__(api,
+ editgroup_description=eg_desc,
+ editgroup_extra=eg_extra,
+ **kwargs)
+
+ # bezerk mode doesn't make sense for this importer
+ assert self.bezerk_mode == False
+
+ def want(self, row):
+ if not row.get('release_ids'):
+ self.counts['skip-no-release-ids'] += 1
+ return False
+ if not row.get('urls'):
+ self.counts['skip-no-urls'] += 1
+ return False
+ if not row.get('manifest'):
+ self.counts['skip-no-files'] += 1
+ return False
+
+ for f in row.get('manifest'):
+ for k in ('sha1', 'md5'):
+ if not f.get(k):
+ self.counts['skip-missing-file-field'] += 1
+ return False
+ return True
+
+ def parse_record(self, row):
+
+ fse = entity_from_dict(
+ row,
+ fatcat_openapi_client.FilesetEntity,
+ api_client=self.api.api_client,
+ )
+ fse = self.generic_fileset_cleanups(fse)
+ return fse
+
+ def try_update(self, fse):
+
+ if not self.skip_release_fileset_check:
+ for release_id in fse.release_ids:
+ # don't catch 404, that would be an error
+ release = self.api.get_release(release_id, expand='filesets', hide='abstracts,refs')
+ assert release.state == 'active'
+ if release.filesets:
+ self.counts['exists'] += 1
+ self.counts['exists-via-release-filesets'] += 1
+ return False
+
+ # do the insert
+ return True
+
+ def insert_batch(self, batch):
+ self.api.create_fileset_auto_batch(fatcat_openapi_client.FilesetAutoBatch(
+ editgroup=fatcat_openapi_client.Editgroup(
+ description=self.editgroup_description,
+ extra=self.editgroup_extra),
+ entity_list=batch))
diff --git a/python/fatcat_tools/importers/ingest.py b/python/fatcat_tools/importers/ingest.py
index bc759219..288c4cff 100644
--- a/python/fatcat_tools/importers/ingest.py
+++ b/python/fatcat_tools/importers/ingest.py
@@ -64,6 +64,9 @@ class IngestFileResultImporter(EntityImporter):
"application/jats+xml", "application/tei+xml", "text/xml"):
self.counts['skip-mimetype'] += 1
return False
+ elif row['request'].get('ingest_type') in ['component', 'src', 'dataset-file']:
+ # we rely on sandcrawler for these checks
+ pass
else:
self.counts['skip-ingest-type'] += 1
return False
@@ -336,7 +339,7 @@ class SavePaperNowFileImporter(IngestFileResultImporter):
eg_desc = kwargs.pop('editgroup_description', None) or "Files crawled after a public 'Save Paper Now' request"
eg_extra = kwargs.pop('editgroup_extra', dict())
- eg_extra['agent'] = eg_extra.get('agent', 'fatcat_tools.IngestFileSavePaperNow')
+ eg_extra['agent'] = eg_extra.get('agent', 'fatcat_tools.SavePaperNowFileImporter')
kwargs['submit_mode'] = submit_mode
kwargs['require_grobid'] = False
kwargs['do_updates'] = False
@@ -533,7 +536,7 @@ class SavePaperNowWebImporter(IngestWebResultImporter):
eg_desc = kwargs.pop('editgroup_description', None) or "Webcaptures crawled after a public 'Save Paper Now' request"
eg_extra = kwargs.pop('editgroup_extra', dict())
- eg_extra['agent'] = eg_extra.get('agent', 'fatcat_tools.IngestWebSavePaperNow')
+ eg_extra['agent'] = eg_extra.get('agent', 'fatcat_tools.SavePaperNowWebImporter')
kwargs['submit_mode'] = submit_mode
kwargs['do_updates'] = False
super().__init__(api,
@@ -573,3 +576,244 @@ class SavePaperNowWebImporter(IngestWebResultImporter):
return False
return True
+
+
+class IngestFilesetResultImporter(IngestFileResultImporter):
+ """
+ Variant of IngestFileResultImporter for processing, eg, dataset ingest
+ results into fileset objects.
+ """
+
+ def __init__(self, api, **kwargs):
+
+ eg_desc = kwargs.pop('editgroup_description', None) or "Filesets crawled from web using sandcrawler ingest tool"
+ eg_extra = kwargs.pop('editgroup_extra', dict())
+ eg_extra['agent'] = eg_extra.get('agent', 'fatcat_tools.IngestFilesetResultImporter')
+ kwargs['do_updates'] = False
+ super().__init__(api,
+ editgroup_description=eg_desc,
+ editgroup_extra=eg_extra,
+ **kwargs)
+ self.max_file_count = 300
+
+ def want_fileset(self, row):
+
+ if not row.get('manifest') or len(row.get('manifest')) == 0:
+ self.counts['skip-empty-manifest'] += 1
+ return False
+
+ if len(row.get('manifest')) == 1:
+ self.counts['skip-single-file'] += 1
+ return False
+
+ if len(row.get('manifest')) > self.max_file_count:
+ self.counts['skip-too-many-files'] += 1
+ return False
+
+ return True
+
+ def want(self, row):
+
+ if not self.want_ingest(row):
+ return False
+
+ # fileset-specific filters
+ if row['request'].get('ingest_type') not in ['dataset',]:
+ self.counts['skip-ingest-type'] += 1
+ return False
+
+ if not self.want_fileset(row):
+ return False
+
+ return True
+
+ def parse_fileset_urls(self, row):
+ if not row.get('strategy'):
+ return []
+ strategy = row['strategy']
+ urls = []
+ if strategy == 'archiveorg-fileset' and row.get('archiveorg_item_name'):
+ urls.append(fatcat_openapi_client.FilesetUrl(
+ url=f"https://archive.org/download/{row['archiveorg_item_name']}/",
+ rel="archive-base",
+ ))
+ if row['strategy'].startswith('web-') and row.get('platform_base_url'):
+ urls.append(fatcat_openapi_client.FilesetUrl(
+ url=f"https://web.archive.org/web/{row['web_base_url_dt']}/{row['web_base_url']}",
+ rel="webarchive-base",
+ ))
+ # TODO: repository-base
+ # TODO: web-base
+
+ if row['strategy'] == 'archiveorg-fileset-bundle' and row.get('archiveorg_item_name'):
+ # TODO: bundle path
+ urls.append(fatcat_openapi_client.FilesetUrl(
+ url=f"https://archive.org/download/{row['archiveorg_item_name']}/{bundle_path}",
+ rel="archive-bundle",
+ ))
+
+ if row['strategy'] == 'web-fileset-bundle' and row.get('platform_bundle_url'):
+ urls.append(fatcat_openapi_client.FilesetUrl(
+ url=f"https://web.archive.org/web/{row['web_bundle_url_dt']}/{row['web_bundle_url']}",
+ rel="webarchive-bundle",
+ ))
+
+ # add any additional / platform URLs here
+ if row.get('platform_bundle_url'):
+ urls.append(fatcat_openapi_client.FilesetUrl(
+ url=row['platform_bundle_url'],
+ rel="repository-bundle",
+ ))
+ if row.get('platform_base_url'):
+ urls.append(fatcat_openapi_client.FilesetUrl(
+ url=row['platform_bundle_url'],
+ rel="repository-base",
+ ))
+ return urls
+
+ def parse_record(self, row):
+
+ request = row['request']
+
+ # double check that want() filtered request correctly
+ if request.get('ingest_type') not in ["dataset",]:
+ self.counts['skip-ingest-type'] += 1
+ return None
+
+ # identify release by fatcat ident, or extid lookup
+ release_ident = self.parse_ingest_release_ident(row)
+
+ if not release_ident:
+ self.counts['skip-release-not-found'] += 1
+ return None
+
+ entity_extra = dict()
+ edit_extra = self.parse_edit_extra(row)
+ edit_extra['ingest_strategy'] = row['ingest_strategy']
+ if row.get('platform'):
+ edit_extra['platform'] = row['platform']
+ if row.get('platform_id'):
+ edit_extra['platform_id'] = row['platform_id']
+
+ entity_urls = self.parse_fileset_urls(row)
+ if not entity_urls:
+ self.counts['skip-no-access-url'] += 1
+ return None
+
+ assert row['file_count'] == len(row['manifest'])
+ if row['file_count'] > self.max_file_count:
+ self.counts['skip-too-many-manifest-files'] += 1
+ return None
+
+ manifest = []
+ for ingest_file in row['manifest']:
+ fsf = fatcat_openapi_client.FilesetFile(
+ path=ingest_file['path'],
+ size=ingest_file['size'],
+ md5=ingest_file['md5'],
+ sha1=ingest_file['sha1'],
+ sha256=ingest_file.get('sha256'),
+ extra=dict(
+ mimetype=ingest_file['mimetype'],
+ ),
+ )
+ if not (fsf.md5 and fsf.sha1 and fsf.path and fsf.size):
+ self.counts['skip-partial-file-info'] += 1
+ return None
+ if ingest_file.get('platform_url'):
+ # XXX: should we include this?
+ fsf.extra['original_url'] = ingest_file['platform_url']
+ if ingest_file.get('terminal_url') and ingest_file.get('terminal_dt'):
+ fsf.extra['wayback_url'] = f"https://web.archive.org/web/{ingest_file['terminal_dt']}/{ingest_file['terminal_url']}"
+ manifest.append(fsf)
+
+ fe = fatcat_openapi_client.FilesetEntity(
+ manifest=manifest,
+ urls=entity_urls,
+ release_ids=[release_ident],
+ )
+
+ if entity_extra:
+ fe.extra = entity_extra
+ if edit_extra:
+ fe.edit_extra = edit_extra
+ return fe
+
+ def try_update(self, wc):
+
+ # check for existing edits-in-progress with same URL
+ for other in self._entity_queue:
+ # XXX: how to duplicate check?
+ if other.original_url == wc.original_url:
+ self.counts['skip-in-queue'] += 1
+ return False
+
+ # lookup sha1, or create new entity (TODO: API doesn't support this yet)
+ #existing = None
+
+ # NOTE: in lieu of existing checks (by lookup), only allow one fileset per release
+ release = self.api.get_release(wc.release_ids[0], expand="filesets")
+ if release.filesets:
+ # XXX: how to duplicate check filesets?
+ # check if this is an existing match, or just a similar hit
+ for other in release.filesets:
+ if wc.original_url == other.original_url:
+ # TODO: compare very similar timestamps of same time (different formats)
+ self.counts['exists'] += 1
+ return False
+ self.counts['skip-release-has-fileset'] += 1
+ return False
+
+ return True
+
+ def insert_batch(self, batch):
+ if self.submit_mode:
+ eg = self.api.create_editgroup(fatcat_openapi_client.Editgroup(
+ description=self.editgroup_description,
+ extra=self.editgroup_extra))
+ for fe in batch:
+ self.api.create_fileset(eg.editgroup_id, fe)
+ self.api.update_editgroup(eg.editgroup_id, eg, submit=True)
+ else:
+ self.api.create_fileset_auto_batch(fatcat_openapi_client.FilesetAutoBatch(
+ editgroup=fatcat_openapi_client.Editgroup(
+ description=self.editgroup_description,
+ extra=self.editgroup_extra),
+ entity_list=batch))
+
+
+class SavePaperNowFilesetImporter(IngestFilesetResultImporter):
+ """
+ Like SavePaperNowFileImporter, but for fileset/dataset ingest.
+ """
+
+ def __init__(self, api, submit_mode=True, **kwargs):
+
+ eg_desc = kwargs.pop('editgroup_description', None) or "Fileset crawled after a public 'Save Paper Now' request"
+ eg_extra = kwargs.pop('editgroup_extra', dict())
+ eg_extra['agent'] = eg_extra.get('agent', 'fatcat_tools.SavePaperNowFilesetImporter')
+ kwargs['submit_mode'] = submit_mode
+ kwargs['do_updates'] = False
+ super().__init__(api,
+ editgroup_description=eg_desc,
+ editgroup_extra=eg_extra,
+ **kwargs)
+
+ def want(self, row):
+
+ source = row['request'].get('ingest_request_source')
+ if not source:
+ self.counts['skip-ingest_request_source'] += 1
+ return False
+ if not source.startswith('savepapernow'):
+ self.counts['skip-not-savepapernow'] += 1
+ return False
+
+ if row.get('hit') != True:
+ self.counts['skip-hit'] += 1
+ return False
+
+ if not self.want_fileset(row):
+ return False
+
+ return True