aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorbnewbold <bnewbold@archive.org>2018-06-17 16:51:06 +0000
committerbnewbold <bnewbold@archive.org>2018-06-17 16:51:06 +0000
commit8cc18540e2663ab02428c15c2e1f17fe5624e1aa (patch)
treea670cace933b903d3fa99daf6cc407d924aaa6ff
parent7bfb1ef88eda039ef0280f9f4398bd2cd5239555 (diff)
parent3e9b927ba668f13f818d20962a6fb6f5783407df (diff)
downloadsandcrawler-8cc18540e2663ab02428c15c2e1f17fe5624e1aa.tar.gz
sandcrawler-8cc18540e2663ab02428c15c2e1f17fe5624e1aa.zip
Merge branch 'spertus-abstract' into 'master'
Refactoring to enable easy creation of groupby jobs. See merge request webgroup/sandcrawler!6
-rw-r--r--scalding/src/main/scala/sandcrawler/HBaseBuilder.scala31
-rw-r--r--scalding/src/main/scala/sandcrawler/HBaseCountJob.scala30
-rw-r--r--scalding/src/main/scala/sandcrawler/HBaseMimeCountJob.scala26
-rw-r--r--scalding/src/main/scala/sandcrawler/HBaseStatusCountJob.scala27
-rw-r--r--scalding/src/test/scala/sandcrawler/HBaseBuilderTest.scala12
-rw-r--r--scalding/src/test/scala/sandcrawler/HBaseMimeCountTest.scala2
-rw-r--r--scalding/src/test/scala/sandcrawler/HBaseStatusCountTest.scala2
7 files changed, 60 insertions, 70 deletions
diff --git a/scalding/src/main/scala/sandcrawler/HBaseBuilder.scala b/scalding/src/main/scala/sandcrawler/HBaseBuilder.scala
index 87cc0cb..85766d6 100644
--- a/scalding/src/main/scala/sandcrawler/HBaseBuilder.scala
+++ b/scalding/src/main/scala/sandcrawler/HBaseBuilder.scala
@@ -13,20 +13,25 @@ object HBaseBuilder {
"match0" -> List("status", "doi", "info"))
val inverseSchema = {for ((k,vs) <- schema; v <-vs) yield (k + ":" + v)}.toList
+ // The argument should be of the form family:column, such as "file:size".
+ @throws(classOf[IllegalArgumentException])
+ def parseColSpec(colSpec: String) {
+ if (!(inverseSchema contains colSpec)) {
+ throw new IllegalArgumentException("No such column: " + colSpec)
+ }
+ val pair = colSpec split(":")
+ if (pair.length != 2) {
+ throw new IllegalArgumentException("Bad column specifier " + colSpec +
+ " (specifiers should be family:name)")
+ }
+ (pair(0), pair(1))
+ }
+
// The argument should be a comma-separated list of family:column, such as "f:c, file:size".
@throws(classOf[IllegalArgumentException])
- def parseColSpec(colSpecs: List[String]) : (List[String], List[Fields]) = {
+ def parseColSpecs(colSpecs: List[String]) : (List[String], List[Fields]) = {
// Verify that all column specifiers are legal.
- for (colSpec <- colSpecs) {
- if (!(inverseSchema contains colSpec)) {
- throw new IllegalArgumentException("No such column: " + colSpec)
- }
- val pair = colSpec split(":")
- if (colSpec.split(":").length != 2) {
- throw new IllegalArgumentException("Bad column specifier " + colSpec +
- " (specifiers should be family:name)")
- }
- }
+ for (colSpec <- colSpecs) parseColSpec(colSpec)
// Produce and return a tuple containing:
// 1. A list of column families.
@@ -39,8 +44,8 @@ object HBaseBuilder {
(families, groupedColNames.map({fields => new Fields(fields : _*)}))
}
- def build(table: String, server: String, colSpec: List[String], sourceMode: SourceMode, keyList: List[String] = List("key")) = {
- val (families, fields) = parseColSpec(colSpec)
+ def build(table: String, server: String, colSpecs: List[String], sourceMode: SourceMode, keyList: List[String] = List("key")) = {
+ val (families, fields) = parseColSpecs(colSpecs)
new HBaseSource(table, server, new Fields("key"), families, fields, sourceMode = sourceMode, keyList = keyList)
}
}
diff --git a/scalding/src/main/scala/sandcrawler/HBaseCountJob.scala b/scalding/src/main/scala/sandcrawler/HBaseCountJob.scala
new file mode 100644
index 0000000..815478b
--- /dev/null
+++ b/scalding/src/main/scala/sandcrawler/HBaseCountJob.scala
@@ -0,0 +1,30 @@
+package sandcrawler
+
+import cascading.property.AppProps
+import cascading.tuple.Fields
+import com.twitter.scalding._
+import java.util.Properties
+import parallelai.spyglass.base.JobBase
+import parallelai.spyglass.hbase.{HBaseSource, HBasePipeConversions}
+import parallelai.spyglass.hbase.HBaseConstants.SourceMode
+
+class HBaseCountJob(args: Args, colSpec: String) extends JobBase(args) with HBasePipeConversions {
+ val output = args("output")
+ HBaseBuilder.parseColSpec(colSpec)
+ val Col: String = colSpec.split(":")(1)
+
+ HBaseCountJob.getHBaseSource(colSpec)
+ .read
+ .fromBytesWritable(Symbol(Col))
+ .debug
+ .groupBy(Col){group => group.size('count)}
+ .write(Tsv(output))
+}
+
+object HBaseCountJob {
+ def getHBaseSource(colSpec: String) = HBaseBuilder.build(
+ "wbgrp-journal-extract-0-qa", // HBase Table Name
+ "mtrcs-zk1.us.archive.org:2181", // HBase Zookeeper server (to get runtime config info; can be array?)
+ List(colSpec),
+ SourceMode.SCAN_ALL)
+}
diff --git a/scalding/src/main/scala/sandcrawler/HBaseMimeCountJob.scala b/scalding/src/main/scala/sandcrawler/HBaseMimeCountJob.scala
index 819a652..250a12d 100644
--- a/scalding/src/main/scala/sandcrawler/HBaseMimeCountJob.scala
+++ b/scalding/src/main/scala/sandcrawler/HBaseMimeCountJob.scala
@@ -1,28 +1,6 @@
package sandcrawler
-import cascading.property.AppProps
-import cascading.tuple.Fields
-import com.twitter.scalding._
-import java.util.Properties
-import parallelai.spyglass.base.JobBase
-import parallelai.spyglass.hbase.{HBaseSource, HBasePipeConversions}
-import parallelai.spyglass.hbase.HBaseConstants.SourceMode
+import com.twitter.scalding.Args
-class HBaseMimeCountJob(args: Args) extends JobBase(args) with HBasePipeConversions {
- val output = args("output")
+class HBaseMimeCountJob(args: Args) extends HBaseCountJob(args, "file:mime") {}
- HBaseMimeCountJob.getHBaseSource
- .read
- .fromBytesWritable(List('mime))
- .debug
- .groupBy('mime){group => group.size('count)}
- .write(Tsv(output))
-}
-
-object HBaseMimeCountJob {
- def getHBaseSource = HBaseBuilder.build(
- "wbgrp-journal-extract-0-qa", // HBase Table Name
- "mtrcs-zk1.us.archive.org:2181", // HBase Zookeeper server (to get runtime config info; can be array?)
- List("file:mime"),
- SourceMode.SCAN_ALL)
-}
diff --git a/scalding/src/main/scala/sandcrawler/HBaseStatusCountJob.scala b/scalding/src/main/scala/sandcrawler/HBaseStatusCountJob.scala
index 0675efc..27b3177 100644
--- a/scalding/src/main/scala/sandcrawler/HBaseStatusCountJob.scala
+++ b/scalding/src/main/scala/sandcrawler/HBaseStatusCountJob.scala
@@ -1,28 +1,5 @@
package sandcrawler
-import cascading.property.AppProps
-import cascading.tuple.Fields
-import com.twitter.scalding._
-import java.util.Properties
-import parallelai.spyglass.base.JobBase
-import parallelai.spyglass.hbase.{HBaseSource, HBasePipeConversions}
-import parallelai.spyglass.hbase.HBaseConstants.SourceMode
+import com.twitter.scalding.Args
-class HBaseStatusCountJob(args: Args) extends JobBase(args) with HBasePipeConversions {
- val output = args("output")
-
- HBaseStatusCountJob.getHBaseSource
- .read
- .fromBytesWritable(List('status))
-// .debug
- .groupBy('status){group => group.size('count)}
- .write(Tsv(output))
-}
-
-object HBaseStatusCountJob {
- def getHBaseSource = HBaseBuilder.build(
- "wbgrp-journal-extract-0-qa", // HBase Table Name
- "mtrcs-zk1.us.archive.org:2181", // HBase Zookeeper server (to get runtime config info; can be array?)
- List("grobid0:status"),
- SourceMode.SCAN_ALL)
-}
+class HBaseStatusCountJob(args: Args) extends HBaseCountJob(args, "grobid0:status")
diff --git a/scalding/src/test/scala/sandcrawler/HBaseBuilderTest.scala b/scalding/src/test/scala/sandcrawler/HBaseBuilderTest.scala
index 4697f56..603a4c7 100644
--- a/scalding/src/test/scala/sandcrawler/HBaseBuilderTest.scala
+++ b/scalding/src/test/scala/sandcrawler/HBaseBuilderTest.scala
@@ -4,8 +4,8 @@ import cascading.tuple.Fields
import org.scalatest._
class HBaseBuilderTest extends FlatSpec with Matchers {
- "parseColSpec()" should "work on legal nontrivial input" in {
- val (fams, fields) = HBaseBuilder.parseColSpec(List("file:size", "file:cdx", "match0:status"))
+ "parseColSpecs()" should "work on legal nontrivial input" in {
+ val (fams, fields) = HBaseBuilder.parseColSpecs(List("file:size", "file:cdx", "match0:status"))
fams should have length 2
fields should have length 2
val fileIndex = fams.indexOf("file")
@@ -17,26 +17,26 @@ class HBaseBuilderTest extends FlatSpec with Matchers {
}
it should "work on empty input" in {
- val (fams, fields) = HBaseBuilder.parseColSpec(List())
+ val (fams, fields) = HBaseBuilder.parseColSpecs(List())
fams should have length 0
fields should have length 0
}
it should "throw IllegalArgumentException on malformed input" in {
a [IllegalArgumentException] should be thrownBy {
- HBaseBuilder.parseColSpec(List("file_size"))
+ HBaseBuilder.parseColSpecs(List("file_size"))
}
}
it should "throw IllegalArgumentException on nonexistent family" in {
a [IllegalArgumentException] should be thrownBy {
- HBaseBuilder.parseColSpec(List("foo:bar"))
+ HBaseBuilder.parseColSpecs(List("foo:bar"))
}
}
it should "throw IllegalArgumentException on nonexistent column" in {
a [IllegalArgumentException] should be thrownBy {
- HBaseBuilder.parseColSpec(List("file:bar"))
+ HBaseBuilder.parseColSpecs(List("file:bar"))
}
}
}
diff --git a/scalding/src/test/scala/sandcrawler/HBaseMimeCountTest.scala b/scalding/src/test/scala/sandcrawler/HBaseMimeCountTest.scala
index eb6f4ff..cc400c5 100644
--- a/scalding/src/test/scala/sandcrawler/HBaseMimeCountTest.scala
+++ b/scalding/src/test/scala/sandcrawler/HBaseMimeCountTest.scala
@@ -41,7 +41,7 @@ class HBaseMimeCountTest extends FunSpec with TupleConversions {
.arg("app.conf.path", "app.conf")
.arg("output", output)
.arg("debug", "true")
- .source[Tuple](HBaseMimeCountJob.getHBaseSource,
+ .source[Tuple](HBaseCountJob.getHBaseSource("file:mime"),
sampleData.map(l => new Tuple(l.map(s => {new ImmutableBytesWritable(Bytes.toBytes(s))}):_*)))
.sink[Tuple](Tsv(output)) {
outputBuffer =>
diff --git a/scalding/src/test/scala/sandcrawler/HBaseStatusCountTest.scala b/scalding/src/test/scala/sandcrawler/HBaseStatusCountTest.scala
index 8b5c3d6..a0b313e 100644
--- a/scalding/src/test/scala/sandcrawler/HBaseStatusCountTest.scala
+++ b/scalding/src/test/scala/sandcrawler/HBaseStatusCountTest.scala
@@ -41,7 +41,7 @@ class HBaseStatusCountTest extends FunSpec with TupleConversions {
.arg("app.conf.path", "app.conf")
.arg("output", output)
.arg("debug", "true")
- .source[Tuple](HBaseStatusCountJob.getHBaseSource,
+ .source[Tuple](HBaseCountJob.getHBaseSource("grobid0:status"),
sampleData.map(l => new Tuple(l.map(s => {new ImmutableBytesWritable(Bytes.toBytes(s))}):_*)))
.sink[Tuple](Tsv(output)) {
outputBuffer =>