aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBryan Newbold <bnewbold@archive.org>2018-08-24 17:01:58 -0700
committerBryan Newbold <bnewbold@archive.org>2018-08-24 17:01:58 -0700
commit4f1c04d6642b883be0c1d9f0e48a83932efc6411 (patch)
tree2172e8376b3e299f3a52750ee166ce5833284725
parentb5a3515efc5e3a7f838a3b59534eb85a279c79a9 (diff)
parent9557323addb8ab0b7f84dc404e8ef3d76243f13f (diff)
downloadsandcrawler-4f1c04d6642b883be0c1d9f0e48a83932efc6411.tar.gz
sandcrawler-4f1c04d6642b883be0c1d9f0e48a83932efc6411.zip
Merge branch 'bnewbold-missing-column'
Manually Resolved Conflicts: please
-rwxr-xr-xplease29
-rw-r--r--scalding/src/main/scala/sandcrawler/MissingColumnDumpJob.scala67
2 files changed, 96 insertions, 0 deletions
diff --git a/please b/please
index de712b9..c888bbc 100755
--- a/please
+++ b/please
@@ -233,6 +233,30 @@ def run_matchbenchmark(args):
right_bibjson=args.right_bibjson)
subprocess.call(cmd, shell=True)
+def run_keysmissingcol(args):
+ if args.rebuild:
+ rebuild_scalding()
+ print("Starting keysmissingcol job...")
+ output = "{}/output-{}/{}-keysmissingcol-{}".format(
+ HDFS_DIR,
+ args.env,
+ datetime.strftime(datetime.now(), "%Y-%m-%d-%H%M.%S"),
+ args.column.replace(":", "_"))
+ cmd = """hadoop jar \
+ scalding/target/scala-2.11/sandcrawler-assembly-0.2.0-SNAPSHOT.jar \
+ com.twitter.scalding.Tool sandcrawler.MissingColumnDumpJob \
+ --hdfs \
+ --app.conf.path scalding/ia_cluster.conf \
+ --hbase-table wbgrp-journal-extract-0-{env} \
+ --zookeeper-hosts {zookeeper_hosts} \
+ --column {column} \
+ --output {output}""".format(
+ output=output,
+ zookeeper_hosts=ZOOKEEPER_HOSTS,
+ column=args.column,
+ env=args.env)
+ subprocess.call(cmd, shell=True)
+
def main():
parser = argparse.ArgumentParser()
@@ -291,6 +315,11 @@ def main():
sub_matchbenchmark.add_argument('output',
help="where to write output")
+ sub_keysmissingcol = subparsers.add_parser('keys-missing-col')
+ sub_keysmissingcol.set_defaults(func=run_keysmissingcol)
+ sub_keysmissingcol.add_argument('column',
+ help="column to SCAN for missing keys")
+
args = parser.parse_args()
if not args.__dict__.get("func"):
print("tell me what to do! (try --help)")
diff --git a/scalding/src/main/scala/sandcrawler/MissingColumnDumpJob.scala b/scalding/src/main/scala/sandcrawler/MissingColumnDumpJob.scala
new file mode 100644
index 0000000..cc3bf23
--- /dev/null
+++ b/scalding/src/main/scala/sandcrawler/MissingColumnDumpJob.scala
@@ -0,0 +1,67 @@
+package sandcrawler
+
+import java.util.Properties
+
+import cascading.property.AppProps
+import cascading.tuple.Fields
+import com.twitter.scalding._
+import com.twitter.scalding.typed.TDsl._
+import parallelai.spyglass.base.JobBase
+import parallelai.spyglass.hbase.HBaseConstants.SourceMode
+import parallelai.spyglass.hbase.HBasePipeConversions
+import parallelai.spyglass.hbase.HBaseSource
+
+// This nasty, no-good, horrible Job outputs a list of keys ("sha1:A234...")
+// for which the given "column" does not have a value set.
+// It does this using a self-join because SpyGlass's HBase SCAN support seems
+// to be extremely limited.
+class MissingColumnDumpJob(args: Args) extends JobBase(args) with HBasePipeConversions {
+
+ val output = args("output")
+
+ val allKeys : TypedPipe[String] = MissingColumnDumpJob.getHBaseKeySource(
+ args("hbase-table"),
+ args("zookeeper-hosts"))
+ .read
+ .fromBytesWritable('key)
+ .toTypedPipe[String]('key)
+
+ val existingKeys : TypedPipe[(String,Boolean)] = MissingColumnDumpJob.getHBaseColSource(
+ args("hbase-table"),
+ args("zookeeper-hosts"),
+ args("column"))
+ .read
+ .fromBytesWritable('key)
+ .toTypedPipe[String]('key)
+ .map{ key => (key, true) }
+
+ val missingKeys : TypedPipe[String] = allKeys
+ .groupBy( identity )
+ .leftJoin(existingKeys.groupBy(_._1))
+ .toTypedPipe
+ .collect { case (key, (_, None)) => key }
+
+ missingKeys
+ .write(TypedTsv[String](output))
+
+}
+
+object MissingColumnDumpJob {
+
+ // eg, "wbgrp-journal-extract-0-qa",7 "mtrcs-zk1.us.archive.org:2181"
+ def getHBaseColSource(hbaseTable: String, zookeeperHosts: String, col: String) : HBaseSource = {
+ HBaseBuilder.build(
+ hbaseTable,
+ zookeeperHosts,
+ List(col),
+ SourceMode.SCAN_ALL)
+ }
+
+ def getHBaseKeySource(hbaseTable: String, zookeeperHosts: String) : HBaseSource = {
+ HBaseBuilder.build(
+ hbaseTable,
+ zookeeperHosts,
+ List("f:c"),
+ SourceMode.SCAN_ALL)
+ }
+}