1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
|
package sandcrawler
import java.util.Properties
import cascading.property.AppProps
import cascading.tuple.Fields
import com.twitter.scalding._
import com.twitter.scalding.typed.TDsl._
import parallelai.spyglass.base.JobBase
import parallelai.spyglass.hbase.HBaseConstants.SourceMode
import parallelai.spyglass.hbase.HBasePipeConversions
import parallelai.spyglass.hbase.HBaseSource
// This nasty, no-good, horrible Job outputs a list of keys ("sha1:A234...")
// for which the given "column" does not have a value set.
// It does this using a self-join because SpyGlass's HBase SCAN support seems
// to be extremely limited.
class MissingColumnDumpJob(args: Args) extends JobBase(args) with HBasePipeConversions {
val output = args("output")
val allKeys : TypedPipe[String] = MissingColumnDumpJob.getHBaseKeySource(
args("hbase-table"),
args("zookeeper-hosts"))
.read
.fromBytesWritable('key)
.toTypedPipe[String]('key)
val existingKeys : TypedPipe[(String,Boolean)] = MissingColumnDumpJob.getHBaseColSource(
args("hbase-table"),
args("zookeeper-hosts"),
args("column"))
.read
.fromBytesWritable('key)
.toTypedPipe[String]('key)
.map{ key => (key, true) }
val missingKeys : TypedPipe[String] = allKeys
.groupBy( identity )
.leftJoin(existingKeys.groupBy(_._1))
.toTypedPipe
.collect { case (key, (_, None)) => key }
missingKeys
.write(TypedTsv[String](output))
}
object MissingColumnDumpJob {
// eg, "wbgrp-journal-extract-0-qa",7 "mtrcs-zk1.us.archive.org:2181"
def getHBaseColSource(hbaseTable: String, zookeeperHosts: String, col: String) : HBaseSource = {
HBaseBuilder.build(
hbaseTable,
zookeeperHosts,
List(col),
SourceMode.SCAN_ALL)
}
def getHBaseKeySource(hbaseTable: String, zookeeperHosts: String) : HBaseSource = {
HBaseBuilder.build(
hbaseTable,
zookeeperHosts,
List("f:c"),
SourceMode.SCAN_ALL)
}
}
|