aboutsummaryrefslogtreecommitdiffstats
path: root/scalding/src/main/scala
diff options
context:
space:
mode:
authorbnewbold <bnewbold@archive.org>2018-06-17 16:51:06 +0000
committerbnewbold <bnewbold@archive.org>2018-06-17 16:51:06 +0000
commit8cc18540e2663ab02428c15c2e1f17fe5624e1aa (patch)
treea670cace933b903d3fa99daf6cc407d924aaa6ff /scalding/src/main/scala
parent7bfb1ef88eda039ef0280f9f4398bd2cd5239555 (diff)
parent3e9b927ba668f13f818d20962a6fb6f5783407df (diff)
downloadsandcrawler-8cc18540e2663ab02428c15c2e1f17fe5624e1aa.tar.gz
sandcrawler-8cc18540e2663ab02428c15c2e1f17fe5624e1aa.zip
Merge branch 'spertus-abstract' into 'master'
Refactoring to enable easy creation of groupby jobs. See merge request webgroup/sandcrawler!6
Diffstat (limited to 'scalding/src/main/scala')
-rw-r--r--scalding/src/main/scala/sandcrawler/HBaseBuilder.scala31
-rw-r--r--scalding/src/main/scala/sandcrawler/HBaseCountJob.scala30
-rw-r--r--scalding/src/main/scala/sandcrawler/HBaseMimeCountJob.scala26
-rw-r--r--scalding/src/main/scala/sandcrawler/HBaseStatusCountJob.scala27
4 files changed, 52 insertions, 62 deletions
diff --git a/scalding/src/main/scala/sandcrawler/HBaseBuilder.scala b/scalding/src/main/scala/sandcrawler/HBaseBuilder.scala
index 87cc0cb..85766d6 100644
--- a/scalding/src/main/scala/sandcrawler/HBaseBuilder.scala
+++ b/scalding/src/main/scala/sandcrawler/HBaseBuilder.scala
@@ -13,20 +13,25 @@ object HBaseBuilder {
"match0" -> List("status", "doi", "info"))
val inverseSchema = {for ((k,vs) <- schema; v <-vs) yield (k + ":" + v)}.toList
+ // The argument should be of the form family:column, such as "file:size".
+ @throws(classOf[IllegalArgumentException])
+ def parseColSpec(colSpec: String) {
+ if (!(inverseSchema contains colSpec)) {
+ throw new IllegalArgumentException("No such column: " + colSpec)
+ }
+ val pair = colSpec split(":")
+ if (pair.length != 2) {
+ throw new IllegalArgumentException("Bad column specifier " + colSpec +
+ " (specifiers should be family:name)")
+ }
+ (pair(0), pair(1))
+ }
+
// The argument should be a comma-separated list of family:column, such as "f:c, file:size".
@throws(classOf[IllegalArgumentException])
- def parseColSpec(colSpecs: List[String]) : (List[String], List[Fields]) = {
+ def parseColSpecs(colSpecs: List[String]) : (List[String], List[Fields]) = {
// Verify that all column specifiers are legal.
- for (colSpec <- colSpecs) {
- if (!(inverseSchema contains colSpec)) {
- throw new IllegalArgumentException("No such column: " + colSpec)
- }
- val pair = colSpec split(":")
- if (colSpec.split(":").length != 2) {
- throw new IllegalArgumentException("Bad column specifier " + colSpec +
- " (specifiers should be family:name)")
- }
- }
+ for (colSpec <- colSpecs) parseColSpec(colSpec)
// Produce and return a tuple containing:
// 1. A list of column families.
@@ -39,8 +44,8 @@ object HBaseBuilder {
(families, groupedColNames.map({fields => new Fields(fields : _*)}))
}
- def build(table: String, server: String, colSpec: List[String], sourceMode: SourceMode, keyList: List[String] = List("key")) = {
- val (families, fields) = parseColSpec(colSpec)
+ def build(table: String, server: String, colSpecs: List[String], sourceMode: SourceMode, keyList: List[String] = List("key")) = {
+ val (families, fields) = parseColSpecs(colSpecs)
new HBaseSource(table, server, new Fields("key"), families, fields, sourceMode = sourceMode, keyList = keyList)
}
}
diff --git a/scalding/src/main/scala/sandcrawler/HBaseCountJob.scala b/scalding/src/main/scala/sandcrawler/HBaseCountJob.scala
new file mode 100644
index 0000000..815478b
--- /dev/null
+++ b/scalding/src/main/scala/sandcrawler/HBaseCountJob.scala
@@ -0,0 +1,30 @@
+package sandcrawler
+
+import cascading.property.AppProps
+import cascading.tuple.Fields
+import com.twitter.scalding._
+import java.util.Properties
+import parallelai.spyglass.base.JobBase
+import parallelai.spyglass.hbase.{HBaseSource, HBasePipeConversions}
+import parallelai.spyglass.hbase.HBaseConstants.SourceMode
+
+class HBaseCountJob(args: Args, colSpec: String) extends JobBase(args) with HBasePipeConversions {
+ val output = args("output")
+ HBaseBuilder.parseColSpec(colSpec)
+ val Col: String = colSpec.split(":")(1)
+
+ HBaseCountJob.getHBaseSource(colSpec)
+ .read
+ .fromBytesWritable(Symbol(Col))
+ .debug
+ .groupBy(Col){group => group.size('count)}
+ .write(Tsv(output))
+}
+
+object HBaseCountJob {
+ def getHBaseSource(colSpec: String) = HBaseBuilder.build(
+ "wbgrp-journal-extract-0-qa", // HBase Table Name
+ "mtrcs-zk1.us.archive.org:2181", // HBase Zookeeper server (to get runtime config info; can be array?)
+ List(colSpec),
+ SourceMode.SCAN_ALL)
+}
diff --git a/scalding/src/main/scala/sandcrawler/HBaseMimeCountJob.scala b/scalding/src/main/scala/sandcrawler/HBaseMimeCountJob.scala
index 819a652..250a12d 100644
--- a/scalding/src/main/scala/sandcrawler/HBaseMimeCountJob.scala
+++ b/scalding/src/main/scala/sandcrawler/HBaseMimeCountJob.scala
@@ -1,28 +1,6 @@
package sandcrawler
-import cascading.property.AppProps
-import cascading.tuple.Fields
-import com.twitter.scalding._
-import java.util.Properties
-import parallelai.spyglass.base.JobBase
-import parallelai.spyglass.hbase.{HBaseSource, HBasePipeConversions}
-import parallelai.spyglass.hbase.HBaseConstants.SourceMode
+import com.twitter.scalding.Args
-class HBaseMimeCountJob(args: Args) extends JobBase(args) with HBasePipeConversions {
- val output = args("output")
+class HBaseMimeCountJob(args: Args) extends HBaseCountJob(args, "file:mime") {}
- HBaseMimeCountJob.getHBaseSource
- .read
- .fromBytesWritable(List('mime))
- .debug
- .groupBy('mime){group => group.size('count)}
- .write(Tsv(output))
-}
-
-object HBaseMimeCountJob {
- def getHBaseSource = HBaseBuilder.build(
- "wbgrp-journal-extract-0-qa", // HBase Table Name
- "mtrcs-zk1.us.archive.org:2181", // HBase Zookeeper server (to get runtime config info; can be array?)
- List("file:mime"),
- SourceMode.SCAN_ALL)
-}
diff --git a/scalding/src/main/scala/sandcrawler/HBaseStatusCountJob.scala b/scalding/src/main/scala/sandcrawler/HBaseStatusCountJob.scala
index 0675efc..27b3177 100644
--- a/scalding/src/main/scala/sandcrawler/HBaseStatusCountJob.scala
+++ b/scalding/src/main/scala/sandcrawler/HBaseStatusCountJob.scala
@@ -1,28 +1,5 @@
package sandcrawler
-import cascading.property.AppProps
-import cascading.tuple.Fields
-import com.twitter.scalding._
-import java.util.Properties
-import parallelai.spyglass.base.JobBase
-import parallelai.spyglass.hbase.{HBaseSource, HBasePipeConversions}
-import parallelai.spyglass.hbase.HBaseConstants.SourceMode
+import com.twitter.scalding.Args
-class HBaseStatusCountJob(args: Args) extends JobBase(args) with HBasePipeConversions {
- val output = args("output")
-
- HBaseStatusCountJob.getHBaseSource
- .read
- .fromBytesWritable(List('status))
-// .debug
- .groupBy('status){group => group.size('count)}
- .write(Tsv(output))
-}
-
-object HBaseStatusCountJob {
- def getHBaseSource = HBaseBuilder.build(
- "wbgrp-journal-extract-0-qa", // HBase Table Name
- "mtrcs-zk1.us.archive.org:2181", // HBase Zookeeper server (to get runtime config info; can be array?)
- List("grobid0:status"),
- SourceMode.SCAN_ALL)
-}
+class HBaseStatusCountJob(args: Args) extends HBaseCountJob(args, "grobid0:status")