diff options
author | Bryan Newbold <bnewbold@archive.org> | 2018-08-20 18:50:07 -0700 |
---|---|---|
committer | Bryan Newbold <bnewbold@archive.org> | 2018-08-21 21:25:56 -0700 |
commit | 7c49fecd01cbd89bb5987442b89a4aafc1186ff9 (patch) | |
tree | c73d532fde19e2492fff573edb1d0ef10d7b084f | |
parent | 9514b6a3620a98e4fc069ca31b77eac6f9c98bec (diff) | |
download | sandcrawler-7c49fecd01cbd89bb5987442b89a4aafc1186ff9.tar.gz sandcrawler-7c49fecd01cbd89bb5987442b89a4aafc1186ff9.zip |
make col counter generic
-rwxr-xr-x | please | 28 | ||||
-rw-r--r-- | scalding/src/main/scala/sandcrawler/HBaseColCountJob.scala (renamed from scalding/src/main/scala/sandcrawler/GrobidMetadataCountJob.scala) | 13 |
2 files changed, 35 insertions, 6 deletions
@@ -165,6 +165,30 @@ def run_grobidscorabledump(args): env=args.env) subprocess.call(cmd, shell=True) +def run_colcount(args): + if args.rebuild: + rebuild_scalding() + print("Starting colcount job...") + output = "{}/output-{}/{}-colcount-{}".format( + HDFS_DIR, + args.env, + datetime.strftime(datetime.now(), "%Y-%m-%d-%H%M.%S"), + args.column) + cmd = """hadoop jar \ + scalding/target/scala-2.11/sandcrawler-assembly-0.2.0-SNAPSHOT.jar \ + com.twitter.scalding.Tool sandcrawler.HBaseColCountJob \ + --hdfs \ + --app.conf.path scalding/ia_cluster.conf \ + --hbase-table wbgrp-journal-extract-0-{env} \ + --zookeeper-hosts {zookeeper_hosts} \ + --column {column} \ + --output {output}""".format( + column=args.column, + output=output, + zookeeper_hosts=ZOOKEEPER_HOSTS, + env=args.env) + subprocess.call(cmd, shell=True) + def main(): parser = argparse.ArgumentParser() @@ -206,6 +230,10 @@ def main(): sub_grobidscorabledump = subparsers.add_parser('grobid-scorable-dump') sub_grobidscorabledump.set_defaults(func=run_grobidscorabledump) + sub_colcount = subparsers.add_parser('col-count') + sub_colcount.set_defaults(func=run_colcount) + sub_colcount.add_argument('column', + help="column name to use in count") args = parser.parse_args() if not args.__dict__.get("func"): diff --git a/scalding/src/main/scala/sandcrawler/GrobidMetadataCountJob.scala b/scalding/src/main/scala/sandcrawler/HBaseColCountJob.scala index 08f3340..a007339 100644 --- a/scalding/src/main/scala/sandcrawler/GrobidMetadataCountJob.scala +++ b/scalding/src/main/scala/sandcrawler/HBaseColCountJob.scala @@ -10,27 +10,28 @@ import parallelai.spyglass.hbase.HBaseConstants.SourceMode import parallelai.spyglass.hbase.HBasePipeConversions import parallelai.spyglass.hbase.HBaseSource -class GrobidMetadataCountJob(args: Args) extends JobBase(args) with HBasePipeConversions { +class HBaseColCountJob(args: Args) extends JobBase(args) with HBasePipeConversions { val output = args("output") - GrobidMetadataCountJob.getHBaseSource( + HBaseColCountJob.getHBaseSource( args("hbase-table"), - args("zookeeper-hosts")) + args("zookeeper-hosts"), + args("column") .read .debug .groupAll { _.size('count) } .write(Tsv(output)) } -object GrobidMetadataCountJob { +object HBaseColCountJob { // eg, "wbgrp-journal-extract-0-qa",7 "mtrcs-zk1.us.archive.org:2181" - def getHBaseSource(hbaseTable: String, zookeeperHosts: String) : HBaseSource = { + def getHBaseSource(hbaseTable: String, zookeeperHosts: String, col: String) : HBaseSource = { HBaseBuilder.build( hbaseTable, zookeeperHosts, - List("grobid0:metadata"), + List(col), SourceMode.SCAN_ALL) } } |