From 70bdcb1ba7ce4aeb423fd6c5ff0ac002302fa1e9 Mon Sep 17 00:00:00 2001 From: Bryan Newbold Date: Wed, 8 Sep 2021 17:49:57 -0700 Subject: generic fileset importer class, with test coverage --- python/fatcat_tools/importers/__init__.py | 1 + python/fatcat_tools/importers/common.py | 4 ++ python/fatcat_tools/importers/fileset_generic.py | 83 ++++++++++++++++++++++++ 3 files changed, 88 insertions(+) create mode 100644 python/fatcat_tools/importers/fileset_generic.py (limited to 'python/fatcat_tools') diff --git a/python/fatcat_tools/importers/__init__.py b/python/fatcat_tools/importers/__init__.py index 9cb18506..5da669e1 100644 --- a/python/fatcat_tools/importers/__init__.py +++ b/python/fatcat_tools/importers/__init__.py @@ -33,3 +33,4 @@ from .file_meta import FileMetaImporter from .doaj_article import DoajArticleImporter from .dblp_release import DblpReleaseImporter from .dblp_container import DblpContainerImporter +from .fileset_generic import FilesetImporter diff --git a/python/fatcat_tools/importers/common.py b/python/fatcat_tools/importers/common.py index e936477c..680b4f9c 100644 --- a/python/fatcat_tools/importers/common.py +++ b/python/fatcat_tools/importers/common.py @@ -447,6 +447,10 @@ class EntityImporter: existing.urls = [u for u in existing.urls if u.url not in redundant_urls] return existing + @staticmethod + def generic_fileset_cleanups(existing): + return existing + def match_existing_release_fuzzy(self, release: ReleaseEntity) -> Optional[Tuple[str, str, ReleaseEntity]]: """ This helper function uses fuzzycat (and elasticsearch) to look for diff --git a/python/fatcat_tools/importers/fileset_generic.py b/python/fatcat_tools/importers/fileset_generic.py new file mode 100644 index 00000000..f0ad5460 --- /dev/null +++ b/python/fatcat_tools/importers/fileset_generic.py @@ -0,0 +1,83 @@ + +import fatcat_openapi_client + +from fatcat_tools import entity_from_dict +from .common import EntityImporter + + +class FilesetImporter(EntityImporter): + """ + General purpose importer for fileset entities. Simply fileset schema JSON + and inserts. + + By default requires release_ids to be non-empty, and will check each + release_id to see if a fileset is already associated; if so, skips the + import. This behavior may change in the future, and can be disabled. + + Currently only creates (insert), no updates. + """ + + def __init__(self, api, **kwargs): + + eg_desc = kwargs.pop('editgroup_description', None) or "Generic Fileset entity import" + eg_extra = kwargs.pop('editgroup_extra', dict()) + eg_extra['agent'] = eg_extra.get('agent', 'fatcat_tools.FilesetImporter') + kwargs['do_updates'] = bool(kwargs.get("do_updates", False)) + self.skip_release_fileset_check = bool(kwargs.get("skip_release_fileset_check", False)) + super().__init__(api, + editgroup_description=eg_desc, + editgroup_extra=eg_extra, + **kwargs) + + # bezerk mode doesn't make sense for this importer + assert self.bezerk_mode == False + + def want(self, row): + if not row.get('release_ids'): + self.counts['skip-no-release-ids'] += 1 + return False + if not row.get('urls'): + self.counts['skip-no-urls'] += 1 + return False + if not row.get('manifest'): + self.counts['skip-no-files'] += 1 + return False + + for f in row.get('manifest'): + for k in ('sha1', 'md5'): + if not f.get(k): + self.counts['skip-missing-file-field'] += 1 + return False + return True + + def parse_record(self, row): + + fse = entity_from_dict( + row, + fatcat_openapi_client.FilesetEntity, + api_client=self.api.api_client, + ) + fse = self.generic_fileset_cleanups(fse) + return fse + + def try_update(self, fse): + + if not self.skip_release_fileset_check: + for release_id in fse.release_ids: + # don't catch 404, that would be an error + release = self.api.get_release(release_id, expand='filesets', hide='abstracts,refs') + assert release.state == 'active' + if release.filesets: + self.counts['exists'] += 1 + self.counts['exists-via-release-filesets'] += 1 + return False + + # do the insert + return True + + def insert_batch(self, batch): + self.api.create_fileset_auto_batch(fatcat_openapi_client.FilesetAutoBatch( + editgroup=fatcat_openapi_client.Editgroup( + description=self.editgroup_description, + extra=self.editgroup_extra), + entity_list=batch)) -- cgit v1.2.3 From c0c9d4da83b027b081eab364bfc7b807dbe9a2e5 Mon Sep 17 00:00:00 2001 From: Bryan Newbold Date: Mon, 11 Oct 2021 16:50:29 -0700 Subject: ingest: handle datasets, components, other ingest types --- python/fatcat_tools/transforms/ingest.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'python/fatcat_tools') diff --git a/python/fatcat_tools/transforms/ingest.py b/python/fatcat_tools/transforms/ingest.py index 42927b2a..9101a4ec 100644 --- a/python/fatcat_tools/transforms/ingest.py +++ b/python/fatcat_tools/transforms/ingest.py @@ -32,8 +32,22 @@ def release_ingest_request(release, ingest_request_source='fatcat', ingest_type= if (not ingest_type) and release.container_id: ingest_type = INGEST_TYPE_CONTAINER_MAP.get(release.container_id) + if not ingest_type: - ingest_type = 'pdf' + if release.release_type == 'stub': + return None + elif release.release_type in ['component', 'graphic']: + ingest_type = 'component' + elif release.release_type == 'dataset': + ingest_type = 'dataset' + elif release.release_type == 'software': + ingest_type = 'software' + elif release.release_type == 'post-weblog': + ingest_type = 'html' + elif release.release_type in ['article-journal', 'article', 'chapter', 'paper-conference', 'book', 'report', 'thesis']: + ingest_type = 'pdf' + else: + ingest_type = 'pdf' # generate a URL where we expect to find fulltext url = None -- cgit v1.2.3 From 75baf7d423a2cb119bd485672a00fd664e32537c Mon Sep 17 00:00:00 2001 From: Bryan Newbold Date: Mon, 11 Oct 2021 16:51:06 -0700 Subject: initial implementation of fileset ingest importers --- python/fatcat_import.py | 74 ++++++++++ python/fatcat_tools/importers/__init__.py | 2 +- python/fatcat_tools/importers/ingest.py | 225 +++++++++++++++++++++++++++++- 3 files changed, 298 insertions(+), 3 deletions(-) (limited to 'python/fatcat_tools') diff --git a/python/fatcat_import.py b/python/fatcat_import.py index 6f331aaa..41a51ad4 100755 --- a/python/fatcat_import.py +++ b/python/fatcat_import.py @@ -164,6 +164,27 @@ def run_ingest_web(args): else: JsonLinePusher(iwri, args.json_file).run() +def run_ingest_fileset(args): + ifri = IngestFilesetResultImporter(args.api, + editgroup_description=args.editgroup_description_override, + skip_source_allowlist=args.skip_source_allowlist, + do_updates=args.do_updates, + default_link_rel=args.default_link_rel, + edit_batch_size=args.batch_size) + if args.kafka_mode: + KafkaJsonPusher( + ifri, + args.kafka_hosts, + args.kafka_env, + "ingest-fileset-results", + "fatcat-{}-ingest-fileset-result".format(args.kafka_env), + kafka_namespace="sandcrawler", + consume_batch_size=args.batch_size, + force_flush=True, + ).run() + else: + JsonLinePusher(ifri, args.json_file).run() + def run_savepapernow_file(args): ifri = SavePaperNowFileImporter(args.api, editgroup_description=args.editgroup_description_override, @@ -200,6 +221,24 @@ def run_savepapernow_web(args): else: JsonLinePusher(ifri, args.json_file).run() +def run_savepapernow_fileset(args): + ifri = SavePaperNowFilesetImporter(args.api, + editgroup_description=args.editgroup_description_override, + edit_batch_size=args.batch_size) + if args.kafka_mode: + KafkaJsonPusher( + ifri, + args.kafka_hosts, + args.kafka_env, + "ingest-file-results", + "fatcat-{}-savepapernow-fileset-result".format(args.kafka_env), + kafka_namespace="sandcrawler", + consume_batch_size=args.batch_size, + force_flush=True, + ).run() + else: + JsonLinePusher(ifri, args.json_file).run() + def run_grobid_metadata(args): fmi = GrobidMetadataImporter(args.api, edit_batch_size=args.batch_size, @@ -569,6 +608,28 @@ def main(): default="web", help="default URL rel for matches (eg, 'publisher', 'web')") + sub_ingest_fileset = subparsers.add_parser('ingest-fileset-results', + help="add/update fileset entities linked to releases based on sandcrawler ingest results") + sub_ingest_fileset.set_defaults( + func=run_ingest_fileset, + auth_var="FATCAT_AUTH_WORKER_CRAWL", + ) + sub_ingest_fileset.add_argument('json_file', + help="ingest_fileset JSON file to import from", + default=sys.stdin, type=argparse.FileType('r')) + sub_ingest_fileset.add_argument('--skip-source-allowlist', + action='store_true', + help="don't filter import based on request source allowlist") + sub_ingest_fileset.add_argument('--kafka-mode', + action='store_true', + help="consume from kafka topic (not stdin)") + sub_ingest_fileset.add_argument('--do-updates', + action='store_true', + help="update pre-existing fileset entities if new match (instead of skipping)") + sub_ingest_fileset.add_argument('--default-link-rel', + default="fileset", + help="default URL rel for matches (eg, 'publisher', 'web')") + sub_savepapernow_file = subparsers.add_parser('savepapernow-file-results', help="add file entities crawled due to async Save Paper Now request") sub_savepapernow_file.set_defaults( @@ -595,6 +656,19 @@ def main(): action='store_true', help="consume from kafka topic (not stdin)") + sub_savepapernow_fileset = subparsers.add_parser('savepapernow-fileset-results', + help="add fileset entities crawled due to async Save Paper Now request") + sub_savepapernow_fileset.set_defaults( + func=run_savepapernow_fileset, + auth_var="FATCAT_AUTH_WORKER_SAVEPAPERNOW", + ) + sub_savepapernow_fileset.add_argument('json_file', + help="ingest-file JSON file to import from", + default=sys.stdin, type=argparse.FileType('r')) + sub_savepapernow_fileset.add_argument('--kafka-mode', + action='store_true', + help="consume from kafka topic (not stdin)") + sub_grobid_metadata = subparsers.add_parser('grobid-metadata', help="create release and file entities based on GROBID PDF metadata extraction") sub_grobid_metadata.set_defaults( diff --git a/python/fatcat_tools/importers/__init__.py b/python/fatcat_tools/importers/__init__.py index 5da669e1..a2224081 100644 --- a/python/fatcat_tools/importers/__init__.py +++ b/python/fatcat_tools/importers/__init__.py @@ -27,7 +27,7 @@ from .orcid import OrcidImporter from .arabesque import ArabesqueMatchImporter, ARABESQUE_MATCH_WHERE_CLAUSE from .wayback_static import auto_wayback_static from .cdl_dash_dat import auto_cdl_dash_dat -from .ingest import IngestFileResultImporter, SavePaperNowFileImporter, IngestWebResultImporter, SavePaperNowWebImporter +from .ingest import IngestFileResultImporter, SavePaperNowFileImporter, IngestWebResultImporter, SavePaperNowWebImporter, IngestFilesetResultImporter, SavePaperNowFilesetImporter from .shadow import ShadowLibraryImporter from .file_meta import FileMetaImporter from .doaj_article import DoajArticleImporter diff --git a/python/fatcat_tools/importers/ingest.py b/python/fatcat_tools/importers/ingest.py index bc759219..38639297 100644 --- a/python/fatcat_tools/importers/ingest.py +++ b/python/fatcat_tools/importers/ingest.py @@ -336,7 +336,7 @@ class SavePaperNowFileImporter(IngestFileResultImporter): eg_desc = kwargs.pop('editgroup_description', None) or "Files crawled after a public 'Save Paper Now' request" eg_extra = kwargs.pop('editgroup_extra', dict()) - eg_extra['agent'] = eg_extra.get('agent', 'fatcat_tools.IngestFileSavePaperNow') + eg_extra['agent'] = eg_extra.get('agent', 'fatcat_tools.SavePaperNowFileImporter') kwargs['submit_mode'] = submit_mode kwargs['require_grobid'] = False kwargs['do_updates'] = False @@ -533,7 +533,7 @@ class SavePaperNowWebImporter(IngestWebResultImporter): eg_desc = kwargs.pop('editgroup_description', None) or "Webcaptures crawled after a public 'Save Paper Now' request" eg_extra = kwargs.pop('editgroup_extra', dict()) - eg_extra['agent'] = eg_extra.get('agent', 'fatcat_tools.IngestWebSavePaperNow') + eg_extra['agent'] = eg_extra.get('agent', 'fatcat_tools.SavePaperNowWebImporter') kwargs['submit_mode'] = submit_mode kwargs['do_updates'] = False super().__init__(api, @@ -573,3 +573,224 @@ class SavePaperNowWebImporter(IngestWebResultImporter): return False return True + + +class IngestFilesetResultImporter(IngestFileResultImporter): + """ + Variant of IngestFileResultImporter for processing, eg, dataset ingest + results into fileset objects. + """ + + def __init__(self, api, **kwargs): + + eg_desc = kwargs.pop('editgroup_description', None) or "Filesets crawled from web using sandcrawler ingest tool" + eg_extra = kwargs.pop('editgroup_extra', dict()) + eg_extra['agent'] = eg_extra.get('agent', 'fatcat_tools.IngestFilesetResultImporter') + kwargs['do_updates'] = False + super().__init__(api, + editgroup_description=eg_desc, + editgroup_extra=eg_extra, + **kwargs) + self.max_file_count = 300 + + def want_fileset(self, row): + + if not row.get('manifest') or len(row.get('manifest')) == 0: + self.counts['skip-empty-manifest'] += 1 + return False + + if len(row.get('manifest')) > self.max_file_count: + self.counts['skip-too-many-files'] += 1 + return False + + return True + + def want(self, row): + + if not self.want_ingest(row): + return False + + # fileset-specific filters + if row['request'].get('ingest_type') not in ['dataset',]: + self.counts['skip-ingest-type'] += 1 + return False + + if not self.want_fileset(row): + return False + + return True + + def parse_fileset_urls(self, row): + # XXX: create URLs and rel for dataset ingest + if not row.get('strategy'): + return [] + if row['strategy'].startswith('archiveorg') and row.get('archiveorg_item_name'): + return [ + fatcat_openapi_client.FilesetUrl( + url=f"https://archive.org/download/{row['archiveorg_item_name']}", + rel="archive", + ) + ] + elif row['strategy'].startswith('web') and row.get('web_base_url'): + return [ + fatcat_openapi_client.FilesetUrl( + url=f"https://web.archive.org/web/{row['web_base_url_dt']}/{row['web_base_url']}", + rel="webarchive", + ) + ] + elif row['strategy'] == 'web-file-bundle' and row.get('web_bundle_url'): + return [ + fatcat_openapi_client.FilesetUrl( + url=f"https://web.archive.org/web/{row['web_bundle_url_dt']}/{row['web_bundle_url']}", + rel="webarchive", + ) + ] + else: + return [] + + def parse_record(self, row): + + request = row['request'] + + # double check that want() filtered request correctly + if request.get('ingest_type') not in ["dataset",]: + self.counts['skip-ingest-type'] += 1 + return None + + # identify release by fatcat ident, or extid lookup + release_ident = self.parse_ingest_release_ident(row) + + if not release_ident: + self.counts['skip-release-not-found'] += 1 + return None + + entity_extra = dict() + edit_extra = self.parse_edit_extra(row) + edit_extra['ingest_strategy'] = row['ingest_strategy'] + if row.get('platform'): + edit_extra['platform'] = row['platform'] + if row.get('platform_id'): + edit_extra['platform_id'] = row['platform_id'] + + entity_urls = self.parse_fileset_urls(row) + if not entity_urls: + self.counts['skip-no-access-url'] += 1 + return None + + assert row['file_count'] == len(row['manifest']) + if row['file_count'] > self.max_file_count: + self.counts['skip-too-many-manifest-files'] += 1 + return None + + manifest = [] + for ingest_file in row['manifest']: + fsf = fatcat_openapi_client.FilesetFile( + path=ingest_file['path'], + size=ingest_file['size'], + md5=ingest_file['md5'], + sha1=ingest_file['sha1'], + sha256=ingest_file.get('sha256'), + extra=dict( + mimetype=ingest_file['mimetype'], + ), + ) + if not (fsf.md5 and fsf.sha1 and fsf.path and fsf.size): + self.counts['skip-partial-file-info'] += 1 + return None + if ingest_file.get('platform_url'): + # XXX: should we include this? + fsf.extra['original_url'] = ingest_file['platform_url'] + if ingest_file.get('terminal_url') and ingest_file.get('terminal_dt'): + fsf.extra['wayback_url'] = f"https://web.archive.org/web/{ingest_file['terminal_dt']}/{ingest_file['terminal_url']}" + manifest.append(fsf) + + fe = fatcat_openapi_client.FilesetEntity( + manifest=manifest, + urls=entity_urls, + release_ids=[release_ident], + ) + + if entity_extra: + fe.extra = entity_extra + if edit_extra: + fe.edit_extra = edit_extra + return fe + + def try_update(self, wc): + + # check for existing edits-in-progress with same URL + for other in self._entity_queue: + # XXX: how to duplicate check? + if other.original_url == wc.original_url: + self.counts['skip-in-queue'] += 1 + return False + + # lookup sha1, or create new entity (TODO: API doesn't support this yet) + #existing = None + + # NOTE: in lieu of existing checks (by lookup), only allow one fileset per release + release = self.api.get_release(wc.release_ids[0], expand="filesets") + if release.filesets: + # XXX: how to duplicate check filesets? + # check if this is an existing match, or just a similar hit + for other in release.filesets: + if wc.original_url == other.original_url: + # TODO: compare very similar timestamps of same time (different formats) + self.counts['exists'] += 1 + return False + self.counts['skip-release-has-fileset'] += 1 + return False + + return True + + def insert_batch(self, batch): + if self.submit_mode: + eg = self.api.create_editgroup(fatcat_openapi_client.Editgroup( + description=self.editgroup_description, + extra=self.editgroup_extra)) + for fe in batch: + self.api.create_fileset(eg.editgroup_id, fe) + self.api.update_editgroup(eg.editgroup_id, eg, submit=True) + else: + self.api.create_fileset_auto_batch(fatcat_openapi_client.FilesetAutoBatch( + editgroup=fatcat_openapi_client.Editgroup( + description=self.editgroup_description, + extra=self.editgroup_extra), + entity_list=batch)) + + +class SavePaperNowFilesetImporter(IngestFilesetResultImporter): + """ + Like SavePaperNowFileImporter, but for fileset/dataset ingest. + """ + + def __init__(self, api, submit_mode=True, **kwargs): + + eg_desc = kwargs.pop('editgroup_description', None) or "Fileset crawled after a public 'Save Paper Now' request" + eg_extra = kwargs.pop('editgroup_extra', dict()) + eg_extra['agent'] = eg_extra.get('agent', 'fatcat_tools.SavePaperNowFilesetImporter') + kwargs['submit_mode'] = submit_mode + kwargs['do_updates'] = False + super().__init__(api, + editgroup_description=eg_desc, + editgroup_extra=eg_extra, + **kwargs) + + def want(self, row): + + source = row['request'].get('ingest_request_source') + if not source: + self.counts['skip-ingest_request_source'] += 1 + return False + if not source.startswith('savepapernow'): + self.counts['skip-not-savepapernow'] += 1 + return False + + if row.get('hit') != True: + self.counts['skip-hit'] += 1 + return False + + if not self.want_fileset(row): + return False + + return True -- cgit v1.2.3 From 5eccb38074104960d88df00805d0ebd7ecf839f9 Mon Sep 17 00:00:00 2001 From: Bryan Newbold Date: Tue, 12 Oct 2021 14:44:44 -0700 Subject: fileset ingest small tweaks --- python/fatcat_tools/importers/ingest.py | 57 +++++++++++++++++++++------------ 1 file changed, 36 insertions(+), 21 deletions(-) (limited to 'python/fatcat_tools') diff --git a/python/fatcat_tools/importers/ingest.py b/python/fatcat_tools/importers/ingest.py index 38639297..36d72651 100644 --- a/python/fatcat_tools/importers/ingest.py +++ b/python/fatcat_tools/importers/ingest.py @@ -624,30 +624,45 @@ class IngestFilesetResultImporter(IngestFileResultImporter): # XXX: create URLs and rel for dataset ingest if not row.get('strategy'): return [] - if row['strategy'].startswith('archiveorg') and row.get('archiveorg_item_name'): - return [ - fatcat_openapi_client.FilesetUrl( - url=f"https://archive.org/download/{row['archiveorg_item_name']}", - rel="archive", - ) - ] - elif row['strategy'].startswith('web') and row.get('web_base_url'): - return [ - fatcat_openapi_client.FilesetUrl( - url=f"https://web.archive.org/web/{row['web_base_url_dt']}/{row['web_base_url']}", - rel="webarchive", - ) - ] - elif row['strategy'] == 'web-file-bundle' and row.get('web_bundle_url'): - return [ - fatcat_openapi_client.FilesetUrl( - url=f"https://web.archive.org/web/{row['web_bundle_url_dt']}/{row['web_bundle_url']}", - rel="webarchive", - ) - ] + urls = [] + if row['strategy'] == 'archiveorg-fileset' and row.get('archiveorg_item_name'): + urls.append(fatcat_openapi_client.FilesetUrl( + url=f"https://archive.org/download/{row['archiveorg_item_name']}/", + rel="archive", + )) + elif row['strategy'] == 'archiveorg-file-hundle' and row.get('archiveorg_item_name'): + # XXX: what is the filename of bundle? + urls.append(fatcat_openapi_client.FilesetUrl( + url=f"https://archive.org/download/{row['archiveorg_item_name']}/", + rel="archive", + )) + elif row['strategy'].startswith('web') and row.get('platform_base_url'): + urls.append(fatcat_openapi_client.FilesetUrl( + url=f"https://web.archive.org/web/{row['web_base_url_dt']}/{row['web_base_url']}", + rel="webarchive", + )) + elif row['strategy'] == 'web-file-bundle' and row.get('platform_bundle_url'): + urls.append(fatcat_openapi_client.FilesetUrl( + url=f"https://web.archive.org/web/{row['web_bundle_url_dt']}/{row['web_bundle_url']}", + rel="webarchive", + )) else: + # if no archival URLs, bail out return [] + # add any additional / platform URLs here + if row.get('platform_bundle_url'): + urls.append(fatcat_openapi_client.FilesetUrl( + url=row['platform_bundle_url'], + rel="repository-bundle", + )) + if row.get('platform_base_url'): + urls.append(fatcat_openapi_client.FilesetUrl( + url=row['platform_bundle_url'], + rel="repository", + )) + return urls + def parse_record(self, row): request = row['request'] -- cgit v1.2.3 From c883f5d4b02b67c1af7d9cafe484ead85f02b97b Mon Sep 17 00:00:00 2001 From: Bryan Newbold Date: Thu, 14 Oct 2021 17:29:11 -0700 Subject: WIP: rel fixes --- python/fatcat_tools/importers/ingest.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'python/fatcat_tools') diff --git a/python/fatcat_tools/importers/ingest.py b/python/fatcat_tools/importers/ingest.py index 36d72651..301be3ef 100644 --- a/python/fatcat_tools/importers/ingest.py +++ b/python/fatcat_tools/importers/ingest.py @@ -628,23 +628,23 @@ class IngestFilesetResultImporter(IngestFileResultImporter): if row['strategy'] == 'archiveorg-fileset' and row.get('archiveorg_item_name'): urls.append(fatcat_openapi_client.FilesetUrl( url=f"https://archive.org/download/{row['archiveorg_item_name']}/", - rel="archive", + rel="archive-base", )) - elif row['strategy'] == 'archiveorg-file-hundle' and row.get('archiveorg_item_name'): + elif row['strategy'] == 'archiveorg-fileset-bundle' and row.get('archiveorg_item_name'): # XXX: what is the filename of bundle? urls.append(fatcat_openapi_client.FilesetUrl( url=f"https://archive.org/download/{row['archiveorg_item_name']}/", - rel="archive", + rel="archive-bundle", )) elif row['strategy'].startswith('web') and row.get('platform_base_url'): urls.append(fatcat_openapi_client.FilesetUrl( url=f"https://web.archive.org/web/{row['web_base_url_dt']}/{row['web_base_url']}", rel="webarchive", )) - elif row['strategy'] == 'web-file-bundle' and row.get('platform_bundle_url'): + elif row['strategy'] == 'web-fileset-bundle' and row.get('platform_bundle_url'): urls.append(fatcat_openapi_client.FilesetUrl( url=f"https://web.archive.org/web/{row['web_bundle_url_dt']}/{row['web_bundle_url']}", - rel="webarchive", + rel="webarchive-bundle", )) else: # if no archival URLs, bail out @@ -659,7 +659,7 @@ class IngestFilesetResultImporter(IngestFileResultImporter): if row.get('platform_base_url'): urls.append(fatcat_openapi_client.FilesetUrl( url=row['platform_bundle_url'], - rel="repository", + rel="repository-base", )) return urls -- cgit v1.2.3 From 6184ecca3a2e072c11482020938566dc8841bf52 Mon Sep 17 00:00:00 2001 From: Bryan Newbold Date: Mon, 18 Oct 2021 10:17:54 -0700 Subject: WIP: more fileset ingest --- python/fatcat_tools/importers/ingest.py | 34 ++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) (limited to 'python/fatcat_tools') diff --git a/python/fatcat_tools/importers/ingest.py b/python/fatcat_tools/importers/ingest.py index 301be3ef..288c4cff 100644 --- a/python/fatcat_tools/importers/ingest.py +++ b/python/fatcat_tools/importers/ingest.py @@ -64,6 +64,9 @@ class IngestFileResultImporter(EntityImporter): "application/jats+xml", "application/tei+xml", "text/xml"): self.counts['skip-mimetype'] += 1 return False + elif row['request'].get('ingest_type') in ['component', 'src', 'dataset-file']: + # we rely on sandcrawler for these checks + pass else: self.counts['skip-ingest-type'] += 1 return False @@ -599,6 +602,10 @@ class IngestFilesetResultImporter(IngestFileResultImporter): self.counts['skip-empty-manifest'] += 1 return False + if len(row.get('manifest')) == 1: + self.counts['skip-single-file'] += 1 + return False + if len(row.get('manifest')) > self.max_file_count: self.counts['skip-too-many-files'] += 1 return False @@ -621,34 +628,35 @@ class IngestFilesetResultImporter(IngestFileResultImporter): return True def parse_fileset_urls(self, row): - # XXX: create URLs and rel for dataset ingest if not row.get('strategy'): return [] + strategy = row['strategy'] urls = [] - if row['strategy'] == 'archiveorg-fileset' and row.get('archiveorg_item_name'): + if strategy == 'archiveorg-fileset' and row.get('archiveorg_item_name'): urls.append(fatcat_openapi_client.FilesetUrl( url=f"https://archive.org/download/{row['archiveorg_item_name']}/", rel="archive-base", )) - elif row['strategy'] == 'archiveorg-fileset-bundle' and row.get('archiveorg_item_name'): - # XXX: what is the filename of bundle? + if row['strategy'].startswith('web-') and row.get('platform_base_url'): urls.append(fatcat_openapi_client.FilesetUrl( - url=f"https://archive.org/download/{row['archiveorg_item_name']}/", - rel="archive-bundle", + url=f"https://web.archive.org/web/{row['web_base_url_dt']}/{row['web_base_url']}", + rel="webarchive-base", )) - elif row['strategy'].startswith('web') and row.get('platform_base_url'): + # TODO: repository-base + # TODO: web-base + + if row['strategy'] == 'archiveorg-fileset-bundle' and row.get('archiveorg_item_name'): + # TODO: bundle path urls.append(fatcat_openapi_client.FilesetUrl( - url=f"https://web.archive.org/web/{row['web_base_url_dt']}/{row['web_base_url']}", - rel="webarchive", + url=f"https://archive.org/download/{row['archiveorg_item_name']}/{bundle_path}", + rel="archive-bundle", )) - elif row['strategy'] == 'web-fileset-bundle' and row.get('platform_bundle_url'): + + if row['strategy'] == 'web-fileset-bundle' and row.get('platform_bundle_url'): urls.append(fatcat_openapi_client.FilesetUrl( url=f"https://web.archive.org/web/{row['web_bundle_url_dt']}/{row['web_bundle_url']}", rel="webarchive-bundle", )) - else: - # if no archival URLs, bail out - return [] # add any additional / platform URLs here if row.get('platform_bundle_url'): -- cgit v1.2.3