aboutsummaryrefslogtreecommitdiffstats
path: root/scalding/src/main
diff options
context:
space:
mode:
authorbnewbold <bnewbold@archive.org>2018-07-17 20:59:44 +0000
committerbnewbold <bnewbold@archive.org>2018-07-17 20:59:44 +0000
commit8b84595e0ddaf5fe1c3b21e36191c0689de73294 (patch)
tree0537864c01cc75bd62d23b9d64389ea060592693 /scalding/src/main
parent746870a10215549c25a16529eabaeb199a3b9228 (diff)
parentd0ed197859dfcadf89f5321939bb5e83e1bee9ed (diff)
downloadsandcrawler-8b84595e0ddaf5fe1c3b21e36191c0689de73294.tar.gz
sandcrawler-8b84595e0ddaf5fe1c3b21e36191c0689de73294.zip
Merge branch 'both-status-code' into 'master'
refactor HBaseStatusCountJob to convert Long column See merge request webgroup/sandcrawler!9
Diffstat (limited to 'scalding/src/main')
-rw-r--r--scalding/src/main/scala/sandcrawler/HBaseStatusCountJob.scala25
1 files changed, 24 insertions, 1 deletions
diff --git a/scalding/src/main/scala/sandcrawler/HBaseStatusCountJob.scala b/scalding/src/main/scala/sandcrawler/HBaseStatusCountJob.scala
index aabf9f8..dbd444d 100644
--- a/scalding/src/main/scala/sandcrawler/HBaseStatusCountJob.scala
+++ b/scalding/src/main/scala/sandcrawler/HBaseStatusCountJob.scala
@@ -1,5 +1,28 @@
package sandcrawler
+
import com.twitter.scalding.Args
+import com.twitter.scalding._
+import com.twitter.scalding.typed.TDsl._
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable
+import org.apache.hadoop.hbase.util.Bytes
+import parallelai.spyglass.base.JobBase
+import parallelai.spyglass.hbase.HBasePipeConversions
+
+
+class HBaseStatusCountJob(args: Args) extends JobBase(args) with HBasePipeConversions {
+
+ val source = HBaseCountJob.getHBaseSource(args("hbase-table"),
+ args("zookeeper-hosts"),
+ "grobid0:status_code")
+
+ val statusPipe : TypedPipe[Long] = source
+ .read
+ .toTypedPipe[(ImmutableBytesWritable,ImmutableBytesWritable)]('key, 'status_code)
+ .map { case (key, raw_code) => Bytes.toLong(raw_code.copyBytes()) }
-class HBaseStatusCountJob(args: Args) extends HBaseCountJob(args, "grobid0:status_code")
+ statusPipe.groupBy { identity }
+ .size
+ .debug
+ .write(TypedTsv[(Long,Long)](args("output")))
+}