From 6970c63e2f111023be29b34e36c929dc0da5f70f Mon Sep 17 00:00:00 2001 From: Bryan Newbold Date: Fri, 27 Jul 2018 23:37:18 +0000 Subject: add 'please' command for crossref matching --- please | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'please') diff --git a/please b/please index a244b80..3563343 100755 --- a/please +++ b/please @@ -116,6 +116,29 @@ def run_statuscount(args): env=args.env) subprocess.call(cmd, shell=True) +def run_matchcrossref(args): + if args.rebuild: + rebuild_scalding() + print("Starting matchcrossref job...") + output = "{}/output-{}/{}-matchcrossref".format( + HDFS_DIR, + args.env, + datetime.strftime(datetime.now(), "%Y-%m-%d-%H%M.%S")) + cmd = """hadoop jar \ + scalding/target/scala-2.11/sandcrawler-assembly-0.2.0-SNAPSHOT.jar \ + com.twitter.scalding.Tool sandcrawler.HBaseCrossrefScoreJob \ + --hdfs \ + --app.conf.path scalding/ia_cluster.conf \ + --hbase-table wbgrp-journal-extract-0-{env} \ + --zookeeper-hosts {zookeeper_hosts} \ + --crossref-input {crossref_input} \ + --output {output}""".format( + output=output, + zookeeper_hosts=ZOOKEEPER_HOSTS, + env=args.env, + crossref_input=args.crossref_input) + subprocess.call(cmd, shell=True) + def main(): parser = argparse.ArgumentParser() @@ -146,6 +169,11 @@ def main(): sub_statuscount = subparsers.add_parser('status-count') sub_statuscount.set_defaults(func=run_statuscount) + sub_matchcrossref = subparsers.add_parser('match-crossref') + sub_matchcrossref.set_defaults(func=run_matchcrossref) + sub_matchcrossref.add_argument('crossref_input', + help="full HDFS path of Crossref JSON dump") + args = parser.parse_args() if not args.__dict__.get("func"): print("tell me what to do! (try --help)") -- cgit v1.2.3 From fafe5b1b2d8f34c6f336b7ae1a48cc78deb90c11 Mon Sep 17 00:00:00 2001 From: Bryan Newbold Date: Wed, 15 Aug 2018 19:10:13 -0700 Subject: update 'please' command for scoring refactor --- please | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'please') diff --git a/please b/please index 3563343..1a992f2 100755 --- a/please +++ b/please @@ -124,9 +124,13 @@ def run_matchcrossref(args): HDFS_DIR, args.env, datetime.strftime(datetime.now(), "%Y-%m-%d-%H%M.%S")) + # Notes: -D options must come after Tool but before class name + # https://github.com/twitter/scalding/wiki/Frequently-asked-questions#how-do-i-pass-parameters-to-my-hadoop-job-number-of-reducers--memory-options--etc- cmd = """hadoop jar \ scalding/target/scala-2.11/sandcrawler-assembly-0.2.0-SNAPSHOT.jar \ - com.twitter.scalding.Tool sandcrawler.HBaseCrossrefScoreJob \ + com.twitter.scalding.Tool \ + -Dmapred.reduce.tasks={reducers} \ + sandcrawler.ScoreJob \ --hdfs \ --app.conf.path scalding/ia_cluster.conf \ --hbase-table wbgrp-journal-extract-0-{env} \ @@ -136,6 +140,7 @@ def run_matchcrossref(args): output=output, zookeeper_hosts=ZOOKEEPER_HOSTS, env=args.env, + reducers=args.reducers, crossref_input=args.crossref_input) subprocess.call(cmd, shell=True) @@ -173,6 +178,10 @@ def main(): sub_matchcrossref.set_defaults(func=run_matchcrossref) sub_matchcrossref.add_argument('crossref_input', help="full HDFS path of Crossref JSON dump") + sub_matchcrossref.add_argument('--reducers', + help="number of reducers to run", + type=int, default=30) + args = parser.parse_args() if not args.__dict__.get("func"): -- cgit v1.2.3