diff options
author | bnewbold <bnewbold@archive.org> | 2018-06-07 20:02:42 +0000 |
---|---|---|
committer | bnewbold <bnewbold@archive.org> | 2018-06-07 20:02:42 +0000 |
commit | 410c48faf2099de74292e8583fcd2524d6fd1b7c (patch) | |
tree | 9684b67ee0b2ac8e7ca3a047fc8368477faf19f5 /scalding | |
parent | 625ef34f957f7f5fdad99c6ce9d84cf7891fbdef (diff) | |
parent | 6eca6290aa3fc829f4767023ae075350a0a78192 (diff) | |
download | sandcrawler-410c48faf2099de74292e8583fcd2524d6fd1b7c.tar.gz sandcrawler-410c48faf2099de74292e8583fcd2524d6fd1b7c.zip |
Merge branch 'groupby' into 'master'
Added HBaseMimeCount{Job,Test}, which counts the number of rows with each mimetype, as an example of groupby
See merge request webgroup/sandcrawler!4
Diffstat (limited to 'scalding')
4 files changed, 178 insertions, 0 deletions
diff --git a/scalding/src/main/scala/sandcrawler/HBaseMimeCountJob.scala b/scalding/src/main/scala/sandcrawler/HBaseMimeCountJob.scala new file mode 100644 index 0000000..819a652 --- /dev/null +++ b/scalding/src/main/scala/sandcrawler/HBaseMimeCountJob.scala @@ -0,0 +1,28 @@ +package sandcrawler + +import cascading.property.AppProps +import cascading.tuple.Fields +import com.twitter.scalding._ +import java.util.Properties +import parallelai.spyglass.base.JobBase +import parallelai.spyglass.hbase.{HBaseSource, HBasePipeConversions} +import parallelai.spyglass.hbase.HBaseConstants.SourceMode + +class HBaseMimeCountJob(args: Args) extends JobBase(args) with HBasePipeConversions { + val output = args("output") + + HBaseMimeCountJob.getHBaseSource + .read + .fromBytesWritable(List('mime)) + .debug + .groupBy('mime){group => group.size('count)} + .write(Tsv(output)) +} + +object HBaseMimeCountJob { + def getHBaseSource = HBaseBuilder.build( + "wbgrp-journal-extract-0-qa", // HBase Table Name + "mtrcs-zk1.us.archive.org:2181", // HBase Zookeeper server (to get runtime config info; can be array?) + List("file:mime"), + SourceMode.SCAN_ALL) +} diff --git a/scalding/src/main/scala/sandcrawler/HBaseStatusCountJob.scala b/scalding/src/main/scala/sandcrawler/HBaseStatusCountJob.scala new file mode 100644 index 0000000..0675efc --- /dev/null +++ b/scalding/src/main/scala/sandcrawler/HBaseStatusCountJob.scala @@ -0,0 +1,28 @@ +package sandcrawler + +import cascading.property.AppProps +import cascading.tuple.Fields +import com.twitter.scalding._ +import java.util.Properties +import parallelai.spyglass.base.JobBase +import parallelai.spyglass.hbase.{HBaseSource, HBasePipeConversions} +import parallelai.spyglass.hbase.HBaseConstants.SourceMode + +class HBaseStatusCountJob(args: Args) extends JobBase(args) with HBasePipeConversions { + val output = args("output") + + HBaseStatusCountJob.getHBaseSource + .read + .fromBytesWritable(List('status)) +// .debug + .groupBy('status){group => group.size('count)} + .write(Tsv(output)) +} + +object HBaseStatusCountJob { + def getHBaseSource = HBaseBuilder.build( + "wbgrp-journal-extract-0-qa", // HBase Table Name + "mtrcs-zk1.us.archive.org:2181", // HBase Zookeeper server (to get runtime config info; can be array?) + List("grobid0:status"), + SourceMode.SCAN_ALL) +} diff --git a/scalding/src/test/scala/sandcrawler/HBaseMimeCountTest.scala b/scalding/src/test/scala/sandcrawler/HBaseMimeCountTest.scala new file mode 100644 index 0000000..eb6f4ff --- /dev/null +++ b/scalding/src/test/scala/sandcrawler/HBaseMimeCountTest.scala @@ -0,0 +1,61 @@ +package sandcrawler + +import cascading.tuple.{Tuple, Fields} +import com.twitter.scalding.{JobTest, Tsv, TupleConversions} +import org.apache.hadoop.hbase.io.ImmutableBytesWritable +import org.apache.hadoop.hbase.util.Bytes +import org.junit.runner.RunWith +import org.scalatest.FunSpec +import org.scalatest.junit.JUnitRunner +import org.slf4j.LoggerFactory +import parallelai.spyglass.hbase.HBaseSource +import parallelai.spyglass.hbase.HBaseConstants.SourceMode +import scala._ + +@RunWith(classOf[JUnitRunner]) +class HBaseMimeCountTest extends FunSpec with TupleConversions { + + val output = "/tmp/testOutput" + + val log = LoggerFactory.getLogger(this.getClass.getName) + + val mimeType1 = "text/html" + val mimeType2 = "application/pdf" + + val sampleData = List( + List("sha1:K2DKSSVTXWPRMFDTWSTCQW3RVWRIOV3Q", mimeType1), + List("sha1:C3YNNEGH5WAG5ZAAXWAEBNXJWT6CZ3WU", mimeType1), + List("sha1:SDKUVHC3YNNEGH5WAG5ZAAXWAEBNX4WT", mimeType2), + List("sha1:35985C3YNNEGH5WAG5ZAAXWAEBNXJW56", mimeType2), + List("sha1:885C3YNNEGH5WAG5ZAAXWA8BNXJWT6CZ", mimeType2), + List("sha1:00904C3YNNEGH5WAG5ZA9XWAEBNXJWT6", mimeType2), + List("sha1:249C3YNNEGH5WAG5ZAAXWAEBNXJWT6CZ", mimeType1), + List("sha1:095893C3YNNEGH5WAG5ZAAXWAEBNXJWT", mimeType2) + ) + + val mimeType1Count = sampleData.count(lst => lst(1) == mimeType1) + val mimeType2Count = sampleData.count(lst => lst(1) == mimeType2) + + JobTest("sandcrawler.HBaseMimeCountJob") + .arg("test", "") + .arg("app.conf.path", "app.conf") + .arg("output", output) + .arg("debug", "true") + .source[Tuple](HBaseMimeCountJob.getHBaseSource, + sampleData.map(l => new Tuple(l.map(s => {new ImmutableBytesWritable(Bytes.toBytes(s))}):_*))) + .sink[Tuple](Tsv(output)) { + outputBuffer => + it("should return a 2-element list.") { + assert(outputBuffer.size === 2) + } + + // Convert List[Tuple] to Map[String, Integer]. + val counts = outputBuffer.map(t => (t.getString(0), t.getInteger(1))).toMap + it("should have the appropriate number of each mime type") { + assert(counts(mimeType1) == mimeType1Count) + assert(counts(mimeType2) == mimeType2Count) + } + } + .run + .finish +} diff --git a/scalding/src/test/scala/sandcrawler/HBaseStatusCountTest.scala b/scalding/src/test/scala/sandcrawler/HBaseStatusCountTest.scala new file mode 100644 index 0000000..8b5c3d6 --- /dev/null +++ b/scalding/src/test/scala/sandcrawler/HBaseStatusCountTest.scala @@ -0,0 +1,61 @@ +package sandcrawler + +import cascading.tuple.{Tuple, Fields} +import com.twitter.scalding.{JobTest, Tsv, TupleConversions} +import org.apache.hadoop.hbase.io.ImmutableBytesWritable +import org.apache.hadoop.hbase.util.Bytes +import org.junit.runner.RunWith +import org.scalatest.FunSpec +import org.scalatest.junit.JUnitRunner +import org.slf4j.LoggerFactory +import parallelai.spyglass.hbase.HBaseSource +import parallelai.spyglass.hbase.HBaseConstants.SourceMode +import scala._ + +@RunWith(classOf[JUnitRunner]) +class HBaseStatusCountTest extends FunSpec with TupleConversions { + + val output = "/tmp/testOutput" + + val log = LoggerFactory.getLogger(this.getClass.getName) + + val statusType1 = "200" + val statusType2 = "404" + + val sampleData = List( + List("sha1:K2DKSSVTXWPRMFDTWSTCQW3RVWRIOV3Q", statusType1), + List("sha1:C3YNNEGH5WAG5ZAAXWAEBNXJWT6CZ3WU", statusType1), + List("sha1:SDKUVHC3YNNEGH5WAG5ZAAXWAEBNX4WT", statusType2), + List("sha1:35985C3YNNEGH5WAG5ZAAXWAEBNXJW56", statusType2), + List("sha1:885C3YNNEGH5WAG5ZAAXWA8BNXJWT6CZ", statusType2), + List("sha1:00904C3YNNEGH5WAG5ZA9XWAEBNXJWT6", statusType2), + List("sha1:249C3YNNEGH5WAG5ZAAXWAEBNXJWT6CZ", statusType1), + List("sha1:095893C3YNNEGH5WAG5ZAAXWAEBNXJWT", statusType2) + ) + + val statusType1Count = sampleData.count(lst => lst(1) == statusType1) + val statusType2Count = sampleData.count(lst => lst(1) == statusType2) + + JobTest("sandcrawler.HBaseStatusCountJob") + .arg("test", "") + .arg("app.conf.path", "app.conf") + .arg("output", output) + .arg("debug", "true") + .source[Tuple](HBaseStatusCountJob.getHBaseSource, + sampleData.map(l => new Tuple(l.map(s => {new ImmutableBytesWritable(Bytes.toBytes(s))}):_*))) + .sink[Tuple](Tsv(output)) { + outputBuffer => + it("should return a 2-element list.") { + assert(outputBuffer.size === 2) + } + + // Convert List[Tuple] to Map[String, Integer]. + val counts = outputBuffer.map(t => (t.getString(0), t.getInteger(1))).toMap + it("should have the appropriate number of each status type") { + assert(counts(statusType1) == statusType1Count) + assert(counts(statusType2) == statusType2Count) + } + } + .run + .finish +} |