aboutsummaryrefslogtreecommitdiffstats
path: root/python/fatcat_import.py
diff options
context:
space:
mode:
authorBryan Newbold <bnewbold@robocracy.org>2021-10-11 16:51:06 -0700
committerBryan Newbold <bnewbold@robocracy.org>2021-10-14 18:11:12 -0700
commit75baf7d423a2cb119bd485672a00fd664e32537c (patch)
tree8655162548fb4c8befc580ef006b38bda4899167 /python/fatcat_import.py
parentc0c9d4da83b027b081eab364bfc7b807dbe9a2e5 (diff)
downloadfatcat-75baf7d423a2cb119bd485672a00fd664e32537c.tar.gz
fatcat-75baf7d423a2cb119bd485672a00fd664e32537c.zip
initial implementation of fileset ingest importers
Diffstat (limited to 'python/fatcat_import.py')
-rwxr-xr-xpython/fatcat_import.py74
1 files changed, 74 insertions, 0 deletions
diff --git a/python/fatcat_import.py b/python/fatcat_import.py
index 6f331aaa..41a51ad4 100755
--- a/python/fatcat_import.py
+++ b/python/fatcat_import.py
@@ -164,6 +164,27 @@ def run_ingest_web(args):
else:
JsonLinePusher(iwri, args.json_file).run()
+def run_ingest_fileset(args):
+ ifri = IngestFilesetResultImporter(args.api,
+ editgroup_description=args.editgroup_description_override,
+ skip_source_allowlist=args.skip_source_allowlist,
+ do_updates=args.do_updates,
+ default_link_rel=args.default_link_rel,
+ edit_batch_size=args.batch_size)
+ if args.kafka_mode:
+ KafkaJsonPusher(
+ ifri,
+ args.kafka_hosts,
+ args.kafka_env,
+ "ingest-fileset-results",
+ "fatcat-{}-ingest-fileset-result".format(args.kafka_env),
+ kafka_namespace="sandcrawler",
+ consume_batch_size=args.batch_size,
+ force_flush=True,
+ ).run()
+ else:
+ JsonLinePusher(ifri, args.json_file).run()
+
def run_savepapernow_file(args):
ifri = SavePaperNowFileImporter(args.api,
editgroup_description=args.editgroup_description_override,
@@ -200,6 +221,24 @@ def run_savepapernow_web(args):
else:
JsonLinePusher(ifri, args.json_file).run()
+def run_savepapernow_fileset(args):
+ ifri = SavePaperNowFilesetImporter(args.api,
+ editgroup_description=args.editgroup_description_override,
+ edit_batch_size=args.batch_size)
+ if args.kafka_mode:
+ KafkaJsonPusher(
+ ifri,
+ args.kafka_hosts,
+ args.kafka_env,
+ "ingest-file-results",
+ "fatcat-{}-savepapernow-fileset-result".format(args.kafka_env),
+ kafka_namespace="sandcrawler",
+ consume_batch_size=args.batch_size,
+ force_flush=True,
+ ).run()
+ else:
+ JsonLinePusher(ifri, args.json_file).run()
+
def run_grobid_metadata(args):
fmi = GrobidMetadataImporter(args.api,
edit_batch_size=args.batch_size,
@@ -569,6 +608,28 @@ def main():
default="web",
help="default URL rel for matches (eg, 'publisher', 'web')")
+ sub_ingest_fileset = subparsers.add_parser('ingest-fileset-results',
+ help="add/update fileset entities linked to releases based on sandcrawler ingest results")
+ sub_ingest_fileset.set_defaults(
+ func=run_ingest_fileset,
+ auth_var="FATCAT_AUTH_WORKER_CRAWL",
+ )
+ sub_ingest_fileset.add_argument('json_file',
+ help="ingest_fileset JSON file to import from",
+ default=sys.stdin, type=argparse.FileType('r'))
+ sub_ingest_fileset.add_argument('--skip-source-allowlist',
+ action='store_true',
+ help="don't filter import based on request source allowlist")
+ sub_ingest_fileset.add_argument('--kafka-mode',
+ action='store_true',
+ help="consume from kafka topic (not stdin)")
+ sub_ingest_fileset.add_argument('--do-updates',
+ action='store_true',
+ help="update pre-existing fileset entities if new match (instead of skipping)")
+ sub_ingest_fileset.add_argument('--default-link-rel',
+ default="fileset",
+ help="default URL rel for matches (eg, 'publisher', 'web')")
+
sub_savepapernow_file = subparsers.add_parser('savepapernow-file-results',
help="add file entities crawled due to async Save Paper Now request")
sub_savepapernow_file.set_defaults(
@@ -595,6 +656,19 @@ def main():
action='store_true',
help="consume from kafka topic (not stdin)")
+ sub_savepapernow_fileset = subparsers.add_parser('savepapernow-fileset-results',
+ help="add fileset entities crawled due to async Save Paper Now request")
+ sub_savepapernow_fileset.set_defaults(
+ func=run_savepapernow_fileset,
+ auth_var="FATCAT_AUTH_WORKER_SAVEPAPERNOW",
+ )
+ sub_savepapernow_fileset.add_argument('json_file',
+ help="ingest-file JSON file to import from",
+ default=sys.stdin, type=argparse.FileType('r'))
+ sub_savepapernow_fileset.add_argument('--kafka-mode',
+ action='store_true',
+ help="consume from kafka topic (not stdin)")
+
sub_grobid_metadata = subparsers.add_parser('grobid-metadata',
help="create release and file entities based on GROBID PDF metadata extraction")
sub_grobid_metadata.set_defaults(