diff options
author | Bryan Newbold <bnewbold@robocracy.org> | 2022-04-07 14:44:01 -0700 |
---|---|---|
committer | Bryan Newbold <bnewbold@robocracy.org> | 2022-04-07 14:44:01 -0700 |
commit | ede98644a89afd15d903061e0998dbd08851df6d (patch) | |
tree | 17c54c5764adb2f5d67aa750174f635e0fb1cdc8 /python/fatcat_import.py | |
parent | 2ef72e0c769e94401568ab42def30ddb5268fa98 (diff) | |
parent | 0aaa2a839d7a14716ee1a84b730203a7953dc5e0 (diff) | |
download | fatcat-ede98644a89afd15d903061e0998dbd08851df6d.tar.gz fatcat-ede98644a89afd15d903061e0998dbd08851df6d.zip |
Merge branch 'bnewbold-dataset-ingest-fixes'
Diffstat (limited to 'python/fatcat_import.py')
-rwxr-xr-x | python/fatcat_import.py | 58 |
1 files changed, 58 insertions, 0 deletions
diff --git a/python/fatcat_import.py b/python/fatcat_import.py index f502d4ed..2896577e 100755 --- a/python/fatcat_import.py +++ b/python/fatcat_import.py @@ -25,6 +25,7 @@ from fatcat_tools.importers import ( FilesetImporter, GrobidMetadataImporter, IngestFileResultImporter, + IngestFilesetFileResultImporter, IngestFilesetResultImporter, IngestWebResultImporter, JalcImporter, @@ -232,6 +233,30 @@ def run_ingest_fileset(args: argparse.Namespace) -> None: JsonLinePusher(ifri, args.json_file).run() +def run_ingest_fileset_file(args: argparse.Namespace) -> None: + ifri = IngestFilesetFileResultImporter( + args.api, + editgroup_description=args.editgroup_description_override, + skip_source_allowlist=args.skip_source_allowlist, + do_updates=args.do_updates, + default_link_rel=args.default_link_rel, + edit_batch_size=args.batch_size, + ) + if args.kafka_mode: + KafkaJsonPusher( + ifri, + args.kafka_hosts, + args.kafka_env, + "ingest-fileset-results", + "fatcat-{}-ingest-fileset-result".format(args.kafka_env), + kafka_namespace="sandcrawler", + consume_batch_size=args.batch_size, + force_flush=True, + ).run() + else: + JsonLinePusher(ifri, args.json_file).run() + + def run_savepapernow_file(args: argparse.Namespace) -> None: ifri = SavePaperNowFileImporter( args.api, @@ -750,6 +775,39 @@ def main() -> None: help="default URL rel for matches (eg, 'publisher', 'web')", ) + sub_ingest_fileset_file = subparsers.add_parser( + "ingest-fileset-file-results", + help="add/update file entities linked to releases based on sandcrawler dataset/fileset ingest results", + ) + sub_ingest_fileset_file.set_defaults( + func=run_ingest_fileset_file, + auth_var="FATCAT_AUTH_WORKER_CRAWL", + ) + sub_ingest_fileset_file.add_argument( + "json_file", + help="ingest_fileset JSON file to import from", + default=sys.stdin, + type=argparse.FileType("r"), + ) + sub_ingest_fileset_file.add_argument( + "--skip-source-allowlist", + action="store_true", + help="don't filter import based on request source allowlist", + ) + sub_ingest_fileset_file.add_argument( + "--kafka-mode", action="store_true", help="consume from kafka topic (not stdin)" + ) + sub_ingest_fileset_file.add_argument( + "--do-updates", + action="store_true", + help="update pre-existing fileset entities if new match (instead of skipping)", + ) + sub_ingest_fileset_file.add_argument( + "--default-link-rel", + default="fileset", + help="default URL rel for matches (eg, 'publisher', 'web')", + ) + sub_savepapernow_file = subparsers.add_parser( "savepapernow-file-results", help="add file entities crawled due to async Save Paper Now request", |