aboutsummaryrefslogtreecommitdiffstats
path: root/scalding
diff options
context:
space:
mode:
Diffstat (limited to 'scalding')
-rw-r--r--scalding/.gitignore3
-rw-r--r--scalding/README.md63
-rw-r--r--scalding/build.sbt49
-rw-r--r--scalding/project/Dependencies.scala5
-rw-r--r--scalding/project/build.properties1
-rw-r--r--scalding/project/plugins.sbt2
-rw-r--r--scalding/src/main/scala/example/SimpleHBaseSourceExample.scala36
-rw-r--r--scalding/src/main/scala/example/WordCountJob.scala12
-rw-r--r--scalding/src/main/scala/sandcrawler/HBaseRowCountJob.scala41
-rw-r--r--scalding/src/test/scala/example/SimpleHBaseSourceExampleTest.scala58
-rw-r--r--scalding/src/test/scala/example/WordCountTest.scala36
-rw-r--r--scalding/src/test/scala/sandcrawler/HBaseRowCountTest.scala59
12 files changed, 365 insertions, 0 deletions
diff --git a/scalding/.gitignore b/scalding/.gitignore
new file mode 100644
index 0000000..7798ee0
--- /dev/null
+++ b/scalding/.gitignore
@@ -0,0 +1,3 @@
+target
+project/project/
+project/targer/
diff --git a/scalding/README.md b/scalding/README.md
new file mode 100644
index 0000000..e41e9ec
--- /dev/null
+++ b/scalding/README.md
@@ -0,0 +1,63 @@
+
+following https://medium.com/@gayani.nan/how-to-run-a-scalding-job-567160fa193
+
+
+running on my laptop:
+
+ openjdk version "1.8.0_171"
+ OpenJDK Runtime Environment (build 1.8.0_171-8u171-b11-1~deb9u1-b11)
+ OpenJDK 64-Bit Server VM (build 25.171-b11, mixed mode)
+
+ Scala code runner version 2.11.8 -- Copyright 2002-2016, LAMP/EPFL
+
+ sbt: 1.1.5
+
+ sbt new scala/scala-seed.g8
+
+ # inserted additional deps, tweaked versions
+ # hadoop 2.5.0 seems to conflict with cascading; sticking with 2.6.0
+
+ sbt assembly
+ scp target/scala-2.11/scald-mvp-assembly-0.1.0-SNAPSHOT.jar devbox:
+
+ # on cluster:
+ yarn jar scald-mvp-assembly-0.1.0-SNAPSHOT.jar WordCount --hdfs --input hdfs:///user/bnewbold/dummy.txt
+
+later, using hadop command instead:
+
+ hadoop jar scald-mvp-assembly-0.1.0-SNAPSHOT.jar com.twitter.scalding.Tool example.WordCountJob --hdfs --input hdfs:///user/bnewbold/dummy.txt --output hdfs:///user/bnewbold/test_scalding_out3
+
+helpful for debugging dependency woes:
+
+ sbt dependencyTree
+
+testing the spyglass example program (expect a table error):
+
+ hadoop jar scald-mvp-assembly-0.1.0-SNAPSHOT.jar com.twitter.scalding.Tool example.SimpleHBaseSourceExample --hdfs --output hdfs:///user/bnewbold/spyglass_out_test --app.conf.path thing.conf --debug true
+ # org.apache.hadoop.hbase.TableNotFoundException: table_name
+
+running a spyglass job (gives a nullpointer exception):
+
+ hadoop jar scald-mvp-assembly-0.1.0-SNAPSHOT.jar com.twitter.scalding.Tool sandcrawler.HBaseRowCountJob --hdfs --output hdfs:///user/bnewbold/spyglass_out_test --app.conf.path thing.conf
+
+ # Caused by: java.lang.NullPointerException
+ # at parallelai.spyglass.hbase.HBaseSource.<init>(HBaseSource.scala:48)
+ # at sandcrawler.HBaseRowCountJob.<init>(HBaseRowCountJob.scala:17)
+
+## Custom build
+
+in SpyGlass repo:
+
+ # This builds the new .jar and installs it in the (laptop local) ~/.m2
+ # repository
+ mvn clean install -U
+
+ # Copy that .jar (and associated pom.xml) over to where sbt can find it
+ mkdir -p ~/.sbt/preloaded/parallelai/
+ cp -r ~/.m2/repository/parallelai/parallelai.spyglass ~/.sbt/preloaded/parallelai/
+
+ # then build here
+ sbt assembly
+
+The medium-term plan here is to push the custom SpyGlass jar as a static maven
+repo to an archive.org item, and point build.sbt to that folder.
diff --git a/scalding/build.sbt b/scalding/build.sbt
new file mode 100644
index 0000000..aae8506
--- /dev/null
+++ b/scalding/build.sbt
@@ -0,0 +1,49 @@
+import Dependencies._
+
+val hadoopVersion = "2.5.0-cdh5.3.1" // IA cluster 2018-05-21: 2.5.0-cdh5.3.1
+val hbaseVersion = "0.98.6-cdh5.3.1" // IA cluster 2018-05-21: 0.98.6-cdh5.3.1
+
+lazy val root = (project in file(".")).
+
+ settings(
+ inThisBuild(List(
+ organization := "org.archive",
+ scalaVersion := "2.11.8",
+ version := "0.1.0-SNAPSHOT",
+ test in assembly := {},
+ )),
+
+ name := "scald-mvp",
+
+ resolvers += "conjars.org" at "http://conjars.org/repo",
+ resolvers += "Apache HBase" at "https://repository.apache.org/content/repositories/releases",
+ resolvers += "Cloudera Maven Repository" at "https://repository.cloudera.com/artifactory/cloudera-repos",
+ resolvers += "Twitter Maven Repository" at "https://maven.twttr.com",
+
+ libraryDependencies += scalaTest % Test,
+ libraryDependencies += "org.scala-lang" % "scala-library" % "2.11.8",
+ libraryDependencies += "com.twitter" % "scalding-core_2.11" % "0.17.2",
+ libraryDependencies += "org.apache.hadoop" % "hadoop-common" % hadoopVersion,
+ libraryDependencies += "org.apache.hadoop" % "hadoop-client" % hadoopVersion,
+ libraryDependencies += "org.apache.hadoop" % "hadoop-mapreduce-client-jobclient" % hadoopVersion classifier "tests",
+ libraryDependencies += "org.apache.hbase" % "hbase-common" % hbaseVersion,
+ libraryDependencies += "parallelai" % "parallelai.spyglass" % "2.11_0.17.2_cdh5.3.1",
+
+ // cargo-culted from twitter/scalding's build.sbt
+ // hint via https://stackoverflow.com/questions/23280494/sbt-assembly-error-deduplicate-different-file-contents-found-in-the-following#23280952
+ mergeStrategy in assembly := {
+ case s if s.endsWith(".class") => MergeStrategy.last
+ case s if s.endsWith("project.clj") => MergeStrategy.concat
+ case s if s.endsWith(".html") => MergeStrategy.last
+ case s if s.endsWith(".dtd") => MergeStrategy.last
+ case s if s.endsWith(".xsd") => MergeStrategy.last
+ case s if s.endsWith("pom.properties") => MergeStrategy.last
+ case s if s.endsWith("pom.xml") => MergeStrategy.last
+ case s if s.endsWith(".jnilib") => MergeStrategy.rename
+ case s if s.endsWith("jansi.dll") => MergeStrategy.rename
+ case s if s.endsWith("libjansi.so") => MergeStrategy.rename
+ case s if s.endsWith("properties") => MergeStrategy.filterDistinctLines
+ case s if s.endsWith("xml") => MergeStrategy.last
+ case x => (mergeStrategy in assembly).value(x)
+ },
+ )
diff --git a/scalding/project/Dependencies.scala b/scalding/project/Dependencies.scala
new file mode 100644
index 0000000..558929d
--- /dev/null
+++ b/scalding/project/Dependencies.scala
@@ -0,0 +1,5 @@
+import sbt._
+
+object Dependencies {
+ lazy val scalaTest = "org.scalatest" %% "scalatest" % "3.0.5"
+}
diff --git a/scalding/project/build.properties b/scalding/project/build.properties
new file mode 100644
index 0000000..31334bb
--- /dev/null
+++ b/scalding/project/build.properties
@@ -0,0 +1 @@
+sbt.version=1.1.1
diff --git a/scalding/project/plugins.sbt b/scalding/project/plugins.sbt
new file mode 100644
index 0000000..084d4bf
--- /dev/null
+++ b/scalding/project/plugins.sbt
@@ -0,0 +1,2 @@
+addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.6")
+addSbtPlugin("net.virtual-void" % "sbt-dependency-graph" % "0.9.0")
diff --git a/scalding/src/main/scala/example/SimpleHBaseSourceExample.scala b/scalding/src/main/scala/example/SimpleHBaseSourceExample.scala
new file mode 100644
index 0000000..fe2a120
--- /dev/null
+++ b/scalding/src/main/scala/example/SimpleHBaseSourceExample.scala
@@ -0,0 +1,36 @@
+package example
+
+import com.twitter.scalding.{Tsv, Args}
+import parallelai.spyglass.base.JobBase
+import org.apache.log4j.{Level, Logger}
+import parallelai.spyglass.hbase.{HBasePipeConversions, HBaseSource}
+import parallelai.spyglass.hbase.HBaseConstants.SourceMode
+import cascading.tuple.Fields
+import cascading.property.AppProps
+import java.util.Properties
+
+/**
+ * Simple example of HBaseSource usage
+ */
+class SimpleHBaseSourceExample(args: Args) extends JobBase(args) with HBasePipeConversions {
+
+ val isDebug: Boolean = args("debug").toBoolean
+
+ if (isDebug) Logger.getRootLogger.setLevel(Level.DEBUG)
+
+ val output = args("output")
+
+ val hbs = new HBaseSource(
+ "table_name",
+ //"quorum_name:2181",
+ "mtrcs-zk1.us.archive.org:2181", // HBase Zookeeper server (to get runtime config info; can be array?)
+ new Fields("key"),
+ List("column_family"),
+ List(new Fields("column_name1", "column_name2")),
+ sourceMode = SourceMode.GET_LIST, keyList = List("1", "2", "3"))
+ .read
+ .debug
+ .fromBytesWritable(new Fields("key", "column_name1", "column_name2"))
+ .write(Tsv(output format "get_list"))
+
+ }
diff --git a/scalding/src/main/scala/example/WordCountJob.scala b/scalding/src/main/scala/example/WordCountJob.scala
new file mode 100644
index 0000000..0e63fed
--- /dev/null
+++ b/scalding/src/main/scala/example/WordCountJob.scala
@@ -0,0 +1,12 @@
+package example
+
+import com.twitter.scalding._
+
+class WordCountJob(args: Args) extends Job(args) {
+ TypedPipe.from(TextLine(args("input")))
+ .flatMap { line => line.split("\\s+") }
+ .map { word => (word, 1L) }
+ .sumByKey
+ // The compiler will enforce the type coming out of the sumByKey is the same as the type we have for our sink
+ .write(TypedTsv[(String, Long)](args("output")))
+}
diff --git a/scalding/src/main/scala/sandcrawler/HBaseRowCountJob.scala b/scalding/src/main/scala/sandcrawler/HBaseRowCountJob.scala
new file mode 100644
index 0000000..5df6b2e
--- /dev/null
+++ b/scalding/src/main/scala/sandcrawler/HBaseRowCountJob.scala
@@ -0,0 +1,41 @@
+package sandcrawler
+
+import com.twitter.scalding._
+import parallelai.spyglass.base.JobBase
+import parallelai.spyglass.hbase.{HBaseSource, HBasePipeConversions, HBaseConstants}
+import parallelai.spyglass.hbase.HBaseConstants.SourceMode
+import cascading.tuple.Fields
+import cascading.property.AppProps
+import java.util.Properties
+
+
+class HBaseRowCountJob(args: Args) extends JobBase(args) with HBasePipeConversions {
+
+
+ // For now doesn't actually count, just dumps a "word count"
+
+ val output = args("output")
+
+ val hbs = new HBaseSource(
+ //"table_name",
+ //"quorum_name:2181",
+ "wbgrp-journal-extract-0-qa", // HBase Table Name
+ "mtrcs-zk1.us.archive.org:2181", // HBase Zookeeper server (to get runtime config info; can be array?)
+ new Fields("key"),
+ List("file"),
+ List(new Fields("size", "mimetype")),
+ sourceMode = SourceMode.GET_LIST, keyList = List("sha1:K2DKSSVTXWPRMFDTWSTCQW3RVWRIOV3Q", "sha1:C3YNNEGH5WAG5ZAAXWAEBNXJWT6CZ3WU"))
+ .read
+ .debug
+ .fromBytesWritable(new Fields("key"))
+ .write(Tsv(output format "get_list"))
+
+ /*
+ List("column_family"),
+ sourceMode = SourceMode.SCAN_ALL)
+ .read
+ .debug
+ .fromBytesWritable(new Fields("key"))
+ .write(Tsv(output format "get_list"))
+ */
+}
diff --git a/scalding/src/test/scala/example/SimpleHBaseSourceExampleTest.scala b/scalding/src/test/scala/example/SimpleHBaseSourceExampleTest.scala
new file mode 100644
index 0000000..cf068c1
--- /dev/null
+++ b/scalding/src/test/scala/example/SimpleHBaseSourceExampleTest.scala
@@ -0,0 +1,58 @@
+package example
+
+import org.junit.runner.RunWith
+import com.twitter.scalding.{JobTest, TupleConversions}
+import org.scalatest.FunSpec
+import org.scalatest.junit.JUnitRunner
+import org.slf4j.LoggerFactory
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable
+import cascading.tuple.{Tuple, Fields}
+import org.apache.hadoop.hbase.util.Bytes
+import scala._
+import com.twitter.scalding.Tsv
+import parallelai.spyglass.hbase.HBaseSource
+import parallelai.spyglass.hbase.HBaseConstants.SourceMode
+
+/**
+ * Example of how to define tests for HBaseSource
+ */
+@RunWith(classOf[JUnitRunner])
+class SimpleHBaseSourceExampleTest extends FunSpec with TupleConversions {
+
+ val output = "/tmp/testOutput"
+
+ val log = LoggerFactory.getLogger(this.getClass.getName)
+
+ val sampleData = List(
+ List("1", "kk1", "pp1"),
+ List("2", "kk2", "pp2"),
+ List("3", "kk3", "pp3")
+ )
+
+ JobTest("example.SimpleHBaseSourceExample")
+ .arg("test", "")
+ .arg("app.conf.path", "app.conf")
+ .arg("output", output)
+ .arg("debug", "true")
+ .source[Tuple](
+ new HBaseSource(
+ "table_name",
+ "mtrcs-zk1.us.archive.org:2181",
+ new Fields("key"),
+ List("column_family"),
+ List(new Fields("column_name1", "column_name2")),
+ sourceMode = SourceMode.GET_LIST, keyList = List("1", "2", "3")),
+ sampleData.map(l => new Tuple(l.map(s => {new ImmutableBytesWritable(Bytes.toBytes(s))}):_*)))
+ .sink[Tuple](Tsv(output format "get_list")) {
+ outputBuffer =>
+ log.debug("Output => " + outputBuffer)
+
+ it("should return the test data provided.") {
+ println("outputBuffer.size => " + outputBuffer.size)
+ assert(outputBuffer.size === 3)
+ }
+ }
+ .run
+ .finish
+
+}
diff --git a/scalding/src/test/scala/example/WordCountTest.scala b/scalding/src/test/scala/example/WordCountTest.scala
new file mode 100644
index 0000000..c42770f
--- /dev/null
+++ b/scalding/src/test/scala/example/WordCountTest.scala
@@ -0,0 +1,36 @@
+/*
+Copyright 2012 Twitter, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package com.twitter.scalding
+
+import org.scalatest.{ Matchers, WordSpec }
+
+class WordCountTest extends WordSpec with Matchers {
+ "A WordCount job" should {
+ JobTest(new example.WordCountJob(_))
+ .arg("input", "inputFile")
+ .arg("output", "outputFile")
+ .source(TextLine("inputFile"), List((0, "hack hack hack and hack")))
+ .sink[(String, Int)](TypedTsv[(String, Long)]("outputFile")){ outputBuffer =>
+ val outMap = outputBuffer.toMap
+ "count words correctly" in {
+ outMap("hack") shouldBe 4
+ outMap("and") shouldBe 1
+ }
+ }
+ .run
+ .finish()
+ }
+}
diff --git a/scalding/src/test/scala/sandcrawler/HBaseRowCountTest.scala b/scalding/src/test/scala/sandcrawler/HBaseRowCountTest.scala
new file mode 100644
index 0000000..598f45d
--- /dev/null
+++ b/scalding/src/test/scala/sandcrawler/HBaseRowCountTest.scala
@@ -0,0 +1,59 @@
+package example
+
+import org.junit.runner.RunWith
+import com.twitter.scalding.{JobTest, TupleConversions}
+import org.scalatest.FunSpec
+import org.scalatest.junit.JUnitRunner
+import org.slf4j.LoggerFactory
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable
+import cascading.tuple.{Tuple, Fields}
+import org.apache.hadoop.hbase.util.Bytes
+import scala._
+import com.twitter.scalding.Tsv
+import parallelai.spyglass.hbase.HBaseSource
+import parallelai.spyglass.hbase.HBaseConstants.SourceMode
+
+/**
+ * Example of how to define tests for HBaseSource
+ */
+@RunWith(classOf[JUnitRunner])
+class HBaseRowCountTest extends FunSpec with TupleConversions {
+
+ val output = "/tmp/testOutput"
+
+ val log = LoggerFactory.getLogger(this.getClass.getName)
+
+ val sampleData = List(
+ List("sha1:K2DKSSVTXWPRMFDTWSTCQW3RVWRIOV3Q", "a", "b"),
+ List("sha1:C3YNNEGH5WAG5ZAAXWAEBNXJWT6CZ3WU", "a", "b")
+ )
+
+ JobTest("sandcrawler.HBaseRowCountJob")
+ .arg("test", "")
+ .arg("app.conf.path", "app.conf")
+ .arg("output", output)
+ .arg("debug", "true")
+ .source[Tuple](
+ new HBaseSource(
+ //"table_name",
+ //"quorum_name:2181",
+ "wbgrp-journal-extract-0-qa",
+ "mtrcs-zk1.us.archive.org:2181",
+ new Fields("key"),
+ List("file"),
+ List(new Fields("size", "mimetype")),
+ sourceMode = SourceMode.GET_LIST, keyList = List("sha1:K2DKSSVTXWPRMFDTWSTCQW3RVWRIOV3Q", "sha1:C3YNNEGH5WAG5ZAAXWAEBNXJWT6CZ3WU")),
+ sampleData.map(l => new Tuple(l.map(s => {new ImmutableBytesWritable(Bytes.toBytes(s))}):_*)))
+ .sink[Tuple](Tsv(output format "get_list")) {
+ outputBuffer =>
+ log.debug("Output => " + outputBuffer)
+
+ it("should return the test data provided.") {
+ println("outputBuffer.size => " + outputBuffer.size)
+ assert(outputBuffer.size === 2)
+ }
+ }
+ .run
+ .finish
+
+}