aboutsummaryrefslogtreecommitdiffstats
path: root/please
diff options
context:
space:
mode:
Diffstat (limited to 'please')
-rwxr-xr-xplease37
1 files changed, 37 insertions, 0 deletions
diff --git a/please b/please
index a244b80..1a992f2 100755
--- a/please
+++ b/please
@@ -116,6 +116,34 @@ def run_statuscount(args):
env=args.env)
subprocess.call(cmd, shell=True)
+def run_matchcrossref(args):
+ if args.rebuild:
+ rebuild_scalding()
+ print("Starting matchcrossref job...")
+ output = "{}/output-{}/{}-matchcrossref".format(
+ HDFS_DIR,
+ args.env,
+ datetime.strftime(datetime.now(), "%Y-%m-%d-%H%M.%S"))
+ # Notes: -D options must come after Tool but before class name
+ # https://github.com/twitter/scalding/wiki/Frequently-asked-questions#how-do-i-pass-parameters-to-my-hadoop-job-number-of-reducers--memory-options--etc-
+ cmd = """hadoop jar \
+ scalding/target/scala-2.11/sandcrawler-assembly-0.2.0-SNAPSHOT.jar \
+ com.twitter.scalding.Tool \
+ -Dmapred.reduce.tasks={reducers} \
+ sandcrawler.ScoreJob \
+ --hdfs \
+ --app.conf.path scalding/ia_cluster.conf \
+ --hbase-table wbgrp-journal-extract-0-{env} \
+ --zookeeper-hosts {zookeeper_hosts} \
+ --crossref-input {crossref_input} \
+ --output {output}""".format(
+ output=output,
+ zookeeper_hosts=ZOOKEEPER_HOSTS,
+ env=args.env,
+ reducers=args.reducers,
+ crossref_input=args.crossref_input)
+ subprocess.call(cmd, shell=True)
+
def main():
parser = argparse.ArgumentParser()
@@ -146,6 +174,15 @@ def main():
sub_statuscount = subparsers.add_parser('status-count')
sub_statuscount.set_defaults(func=run_statuscount)
+ sub_matchcrossref = subparsers.add_parser('match-crossref')
+ sub_matchcrossref.set_defaults(func=run_matchcrossref)
+ sub_matchcrossref.add_argument('crossref_input',
+ help="full HDFS path of Crossref JSON dump")
+ sub_matchcrossref.add_argument('--reducers',
+ help="number of reducers to run",
+ type=int, default=30)
+
+
args = parser.parse_args()
if not args.__dict__.get("func"):
print("tell me what to do! (try --help)")