aboutsummaryrefslogtreecommitdiffstats
path: root/scald-mvp
diff options
context:
space:
mode:
Diffstat (limited to 'scald-mvp')
-rw-r--r--scald-mvp/.gitignore3
-rw-r--r--scald-mvp/README.md63
-rw-r--r--scald-mvp/build.sbt49
-rw-r--r--scald-mvp/project/Dependencies.scala5
-rw-r--r--scald-mvp/project/build.properties1
-rw-r--r--scald-mvp/project/plugins.sbt2
-rw-r--r--scald-mvp/src/main/scala/example/SimpleHBaseSourceExample.scala36
-rw-r--r--scald-mvp/src/main/scala/example/WordCountJob.scala12
-rw-r--r--scald-mvp/src/main/scala/sandcrawler/HBaseRowCountJob.scala41
-rw-r--r--scald-mvp/src/test/scala/example/SimpleHBaseSourceExampleTest.scala58
-rw-r--r--scald-mvp/src/test/scala/example/WordCountTest.scala36
-rw-r--r--scald-mvp/src/test/scala/sandcrawler/HBaseRowCountTest.scala59
12 files changed, 0 insertions, 365 deletions
diff --git a/scald-mvp/.gitignore b/scald-mvp/.gitignore
deleted file mode 100644
index 7798ee0..0000000
--- a/scald-mvp/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-target
-project/project/
-project/targer/
diff --git a/scald-mvp/README.md b/scald-mvp/README.md
deleted file mode 100644
index e41e9ec..0000000
--- a/scald-mvp/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-
-following https://medium.com/@gayani.nan/how-to-run-a-scalding-job-567160fa193
-
-
-running on my laptop:
-
- openjdk version "1.8.0_171"
- OpenJDK Runtime Environment (build 1.8.0_171-8u171-b11-1~deb9u1-b11)
- OpenJDK 64-Bit Server VM (build 25.171-b11, mixed mode)
-
- Scala code runner version 2.11.8 -- Copyright 2002-2016, LAMP/EPFL
-
- sbt: 1.1.5
-
- sbt new scala/scala-seed.g8
-
- # inserted additional deps, tweaked versions
- # hadoop 2.5.0 seems to conflict with cascading; sticking with 2.6.0
-
- sbt assembly
- scp target/scala-2.11/scald-mvp-assembly-0.1.0-SNAPSHOT.jar devbox:
-
- # on cluster:
- yarn jar scald-mvp-assembly-0.1.0-SNAPSHOT.jar WordCount --hdfs --input hdfs:///user/bnewbold/dummy.txt
-
-later, using hadop command instead:
-
- hadoop jar scald-mvp-assembly-0.1.0-SNAPSHOT.jar com.twitter.scalding.Tool example.WordCountJob --hdfs --input hdfs:///user/bnewbold/dummy.txt --output hdfs:///user/bnewbold/test_scalding_out3
-
-helpful for debugging dependency woes:
-
- sbt dependencyTree
-
-testing the spyglass example program (expect a table error):
-
- hadoop jar scald-mvp-assembly-0.1.0-SNAPSHOT.jar com.twitter.scalding.Tool example.SimpleHBaseSourceExample --hdfs --output hdfs:///user/bnewbold/spyglass_out_test --app.conf.path thing.conf --debug true
- # org.apache.hadoop.hbase.TableNotFoundException: table_name
-
-running a spyglass job (gives a nullpointer exception):
-
- hadoop jar scald-mvp-assembly-0.1.0-SNAPSHOT.jar com.twitter.scalding.Tool sandcrawler.HBaseRowCountJob --hdfs --output hdfs:///user/bnewbold/spyglass_out_test --app.conf.path thing.conf
-
- # Caused by: java.lang.NullPointerException
- # at parallelai.spyglass.hbase.HBaseSource.<init>(HBaseSource.scala:48)
- # at sandcrawler.HBaseRowCountJob.<init>(HBaseRowCountJob.scala:17)
-
-## Custom build
-
-in SpyGlass repo:
-
- # This builds the new .jar and installs it in the (laptop local) ~/.m2
- # repository
- mvn clean install -U
-
- # Copy that .jar (and associated pom.xml) over to where sbt can find it
- mkdir -p ~/.sbt/preloaded/parallelai/
- cp -r ~/.m2/repository/parallelai/parallelai.spyglass ~/.sbt/preloaded/parallelai/
-
- # then build here
- sbt assembly
-
-The medium-term plan here is to push the custom SpyGlass jar as a static maven
-repo to an archive.org item, and point build.sbt to that folder.
diff --git a/scald-mvp/build.sbt b/scald-mvp/build.sbt
deleted file mode 100644
index aae8506..0000000
--- a/scald-mvp/build.sbt
+++ /dev/null
@@ -1,49 +0,0 @@
-import Dependencies._
-
-val hadoopVersion = "2.5.0-cdh5.3.1" // IA cluster 2018-05-21: 2.5.0-cdh5.3.1
-val hbaseVersion = "0.98.6-cdh5.3.1" // IA cluster 2018-05-21: 0.98.6-cdh5.3.1
-
-lazy val root = (project in file(".")).
-
- settings(
- inThisBuild(List(
- organization := "org.archive",
- scalaVersion := "2.11.8",
- version := "0.1.0-SNAPSHOT",
- test in assembly := {},
- )),
-
- name := "scald-mvp",
-
- resolvers += "conjars.org" at "http://conjars.org/repo",
- resolvers += "Apache HBase" at "https://repository.apache.org/content/repositories/releases",
- resolvers += "Cloudera Maven Repository" at "https://repository.cloudera.com/artifactory/cloudera-repos",
- resolvers += "Twitter Maven Repository" at "https://maven.twttr.com",
-
- libraryDependencies += scalaTest % Test,
- libraryDependencies += "org.scala-lang" % "scala-library" % "2.11.8",
- libraryDependencies += "com.twitter" % "scalding-core_2.11" % "0.17.2",
- libraryDependencies += "org.apache.hadoop" % "hadoop-common" % hadoopVersion,
- libraryDependencies += "org.apache.hadoop" % "hadoop-client" % hadoopVersion,
- libraryDependencies += "org.apache.hadoop" % "hadoop-mapreduce-client-jobclient" % hadoopVersion classifier "tests",
- libraryDependencies += "org.apache.hbase" % "hbase-common" % hbaseVersion,
- libraryDependencies += "parallelai" % "parallelai.spyglass" % "2.11_0.17.2_cdh5.3.1",
-
- // cargo-culted from twitter/scalding's build.sbt
- // hint via https://stackoverflow.com/questions/23280494/sbt-assembly-error-deduplicate-different-file-contents-found-in-the-following#23280952
- mergeStrategy in assembly := {
- case s if s.endsWith(".class") => MergeStrategy.last
- case s if s.endsWith("project.clj") => MergeStrategy.concat
- case s if s.endsWith(".html") => MergeStrategy.last
- case s if s.endsWith(".dtd") => MergeStrategy.last
- case s if s.endsWith(".xsd") => MergeStrategy.last
- case s if s.endsWith("pom.properties") => MergeStrategy.last
- case s if s.endsWith("pom.xml") => MergeStrategy.last
- case s if s.endsWith(".jnilib") => MergeStrategy.rename
- case s if s.endsWith("jansi.dll") => MergeStrategy.rename
- case s if s.endsWith("libjansi.so") => MergeStrategy.rename
- case s if s.endsWith("properties") => MergeStrategy.filterDistinctLines
- case s if s.endsWith("xml") => MergeStrategy.last
- case x => (mergeStrategy in assembly).value(x)
- },
- )
diff --git a/scald-mvp/project/Dependencies.scala b/scald-mvp/project/Dependencies.scala
deleted file mode 100644
index 558929d..0000000
--- a/scald-mvp/project/Dependencies.scala
+++ /dev/null
@@ -1,5 +0,0 @@
-import sbt._
-
-object Dependencies {
- lazy val scalaTest = "org.scalatest" %% "scalatest" % "3.0.5"
-}
diff --git a/scald-mvp/project/build.properties b/scald-mvp/project/build.properties
deleted file mode 100644
index 31334bb..0000000
--- a/scald-mvp/project/build.properties
+++ /dev/null
@@ -1 +0,0 @@
-sbt.version=1.1.1
diff --git a/scald-mvp/project/plugins.sbt b/scald-mvp/project/plugins.sbt
deleted file mode 100644
index 084d4bf..0000000
--- a/scald-mvp/project/plugins.sbt
+++ /dev/null
@@ -1,2 +0,0 @@
-addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.6")
-addSbtPlugin("net.virtual-void" % "sbt-dependency-graph" % "0.9.0")
diff --git a/scald-mvp/src/main/scala/example/SimpleHBaseSourceExample.scala b/scald-mvp/src/main/scala/example/SimpleHBaseSourceExample.scala
deleted file mode 100644
index fe2a120..0000000
--- a/scald-mvp/src/main/scala/example/SimpleHBaseSourceExample.scala
+++ /dev/null
@@ -1,36 +0,0 @@
-package example
-
-import com.twitter.scalding.{Tsv, Args}
-import parallelai.spyglass.base.JobBase
-import org.apache.log4j.{Level, Logger}
-import parallelai.spyglass.hbase.{HBasePipeConversions, HBaseSource}
-import parallelai.spyglass.hbase.HBaseConstants.SourceMode
-import cascading.tuple.Fields
-import cascading.property.AppProps
-import java.util.Properties
-
-/**
- * Simple example of HBaseSource usage
- */
-class SimpleHBaseSourceExample(args: Args) extends JobBase(args) with HBasePipeConversions {
-
- val isDebug: Boolean = args("debug").toBoolean
-
- if (isDebug) Logger.getRootLogger.setLevel(Level.DEBUG)
-
- val output = args("output")
-
- val hbs = new HBaseSource(
- "table_name",
- //"quorum_name:2181",
- "mtrcs-zk1.us.archive.org:2181", // HBase Zookeeper server (to get runtime config info; can be array?)
- new Fields("key"),
- List("column_family"),
- List(new Fields("column_name1", "column_name2")),
- sourceMode = SourceMode.GET_LIST, keyList = List("1", "2", "3"))
- .read
- .debug
- .fromBytesWritable(new Fields("key", "column_name1", "column_name2"))
- .write(Tsv(output format "get_list"))
-
- }
diff --git a/scald-mvp/src/main/scala/example/WordCountJob.scala b/scald-mvp/src/main/scala/example/WordCountJob.scala
deleted file mode 100644
index 0e63fed..0000000
--- a/scald-mvp/src/main/scala/example/WordCountJob.scala
+++ /dev/null
@@ -1,12 +0,0 @@
-package example
-
-import com.twitter.scalding._
-
-class WordCountJob(args: Args) extends Job(args) {
- TypedPipe.from(TextLine(args("input")))
- .flatMap { line => line.split("\\s+") }
- .map { word => (word, 1L) }
- .sumByKey
- // The compiler will enforce the type coming out of the sumByKey is the same as the type we have for our sink
- .write(TypedTsv[(String, Long)](args("output")))
-}
diff --git a/scald-mvp/src/main/scala/sandcrawler/HBaseRowCountJob.scala b/scald-mvp/src/main/scala/sandcrawler/HBaseRowCountJob.scala
deleted file mode 100644
index 5df6b2e..0000000
--- a/scald-mvp/src/main/scala/sandcrawler/HBaseRowCountJob.scala
+++ /dev/null
@@ -1,41 +0,0 @@
-package sandcrawler
-
-import com.twitter.scalding._
-import parallelai.spyglass.base.JobBase
-import parallelai.spyglass.hbase.{HBaseSource, HBasePipeConversions, HBaseConstants}
-import parallelai.spyglass.hbase.HBaseConstants.SourceMode
-import cascading.tuple.Fields
-import cascading.property.AppProps
-import java.util.Properties
-
-
-class HBaseRowCountJob(args: Args) extends JobBase(args) with HBasePipeConversions {
-
-
- // For now doesn't actually count, just dumps a "word count"
-
- val output = args("output")
-
- val hbs = new HBaseSource(
- //"table_name",
- //"quorum_name:2181",
- "wbgrp-journal-extract-0-qa", // HBase Table Name
- "mtrcs-zk1.us.archive.org:2181", // HBase Zookeeper server (to get runtime config info; can be array?)
- new Fields("key"),
- List("file"),
- List(new Fields("size", "mimetype")),
- sourceMode = SourceMode.GET_LIST, keyList = List("sha1:K2DKSSVTXWPRMFDTWSTCQW3RVWRIOV3Q", "sha1:C3YNNEGH5WAG5ZAAXWAEBNXJWT6CZ3WU"))
- .read
- .debug
- .fromBytesWritable(new Fields("key"))
- .write(Tsv(output format "get_list"))
-
- /*
- List("column_family"),
- sourceMode = SourceMode.SCAN_ALL)
- .read
- .debug
- .fromBytesWritable(new Fields("key"))
- .write(Tsv(output format "get_list"))
- */
-}
diff --git a/scald-mvp/src/test/scala/example/SimpleHBaseSourceExampleTest.scala b/scald-mvp/src/test/scala/example/SimpleHBaseSourceExampleTest.scala
deleted file mode 100644
index cf068c1..0000000
--- a/scald-mvp/src/test/scala/example/SimpleHBaseSourceExampleTest.scala
+++ /dev/null
@@ -1,58 +0,0 @@
-package example
-
-import org.junit.runner.RunWith
-import com.twitter.scalding.{JobTest, TupleConversions}
-import org.scalatest.FunSpec
-import org.scalatest.junit.JUnitRunner
-import org.slf4j.LoggerFactory
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable
-import cascading.tuple.{Tuple, Fields}
-import org.apache.hadoop.hbase.util.Bytes
-import scala._
-import com.twitter.scalding.Tsv
-import parallelai.spyglass.hbase.HBaseSource
-import parallelai.spyglass.hbase.HBaseConstants.SourceMode
-
-/**
- * Example of how to define tests for HBaseSource
- */
-@RunWith(classOf[JUnitRunner])
-class SimpleHBaseSourceExampleTest extends FunSpec with TupleConversions {
-
- val output = "/tmp/testOutput"
-
- val log = LoggerFactory.getLogger(this.getClass.getName)
-
- val sampleData = List(
- List("1", "kk1", "pp1"),
- List("2", "kk2", "pp2"),
- List("3", "kk3", "pp3")
- )
-
- JobTest("example.SimpleHBaseSourceExample")
- .arg("test", "")
- .arg("app.conf.path", "app.conf")
- .arg("output", output)
- .arg("debug", "true")
- .source[Tuple](
- new HBaseSource(
- "table_name",
- "mtrcs-zk1.us.archive.org:2181",
- new Fields("key"),
- List("column_family"),
- List(new Fields("column_name1", "column_name2")),
- sourceMode = SourceMode.GET_LIST, keyList = List("1", "2", "3")),
- sampleData.map(l => new Tuple(l.map(s => {new ImmutableBytesWritable(Bytes.toBytes(s))}):_*)))
- .sink[Tuple](Tsv(output format "get_list")) {
- outputBuffer =>
- log.debug("Output => " + outputBuffer)
-
- it("should return the test data provided.") {
- println("outputBuffer.size => " + outputBuffer.size)
- assert(outputBuffer.size === 3)
- }
- }
- .run
- .finish
-
-}
diff --git a/scald-mvp/src/test/scala/example/WordCountTest.scala b/scald-mvp/src/test/scala/example/WordCountTest.scala
deleted file mode 100644
index c42770f..0000000
--- a/scald-mvp/src/test/scala/example/WordCountTest.scala
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
-Copyright 2012 Twitter, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-package com.twitter.scalding
-
-import org.scalatest.{ Matchers, WordSpec }
-
-class WordCountTest extends WordSpec with Matchers {
- "A WordCount job" should {
- JobTest(new example.WordCountJob(_))
- .arg("input", "inputFile")
- .arg("output", "outputFile")
- .source(TextLine("inputFile"), List((0, "hack hack hack and hack")))
- .sink[(String, Int)](TypedTsv[(String, Long)]("outputFile")){ outputBuffer =>
- val outMap = outputBuffer.toMap
- "count words correctly" in {
- outMap("hack") shouldBe 4
- outMap("and") shouldBe 1
- }
- }
- .run
- .finish()
- }
-}
diff --git a/scald-mvp/src/test/scala/sandcrawler/HBaseRowCountTest.scala b/scald-mvp/src/test/scala/sandcrawler/HBaseRowCountTest.scala
deleted file mode 100644
index 598f45d..0000000
--- a/scald-mvp/src/test/scala/sandcrawler/HBaseRowCountTest.scala
+++ /dev/null
@@ -1,59 +0,0 @@
-package example
-
-import org.junit.runner.RunWith
-import com.twitter.scalding.{JobTest, TupleConversions}
-import org.scalatest.FunSpec
-import org.scalatest.junit.JUnitRunner
-import org.slf4j.LoggerFactory
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable
-import cascading.tuple.{Tuple, Fields}
-import org.apache.hadoop.hbase.util.Bytes
-import scala._
-import com.twitter.scalding.Tsv
-import parallelai.spyglass.hbase.HBaseSource
-import parallelai.spyglass.hbase.HBaseConstants.SourceMode
-
-/**
- * Example of how to define tests for HBaseSource
- */
-@RunWith(classOf[JUnitRunner])
-class HBaseRowCountTest extends FunSpec with TupleConversions {
-
- val output = "/tmp/testOutput"
-
- val log = LoggerFactory.getLogger(this.getClass.getName)
-
- val sampleData = List(
- List("sha1:K2DKSSVTXWPRMFDTWSTCQW3RVWRIOV3Q", "a", "b"),
- List("sha1:C3YNNEGH5WAG5ZAAXWAEBNXJWT6CZ3WU", "a", "b")
- )
-
- JobTest("sandcrawler.HBaseRowCountJob")
- .arg("test", "")
- .arg("app.conf.path", "app.conf")
- .arg("output", output)
- .arg("debug", "true")
- .source[Tuple](
- new HBaseSource(
- //"table_name",
- //"quorum_name:2181",
- "wbgrp-journal-extract-0-qa",
- "mtrcs-zk1.us.archive.org:2181",
- new Fields("key"),
- List("file"),
- List(new Fields("size", "mimetype")),
- sourceMode = SourceMode.GET_LIST, keyList = List("sha1:K2DKSSVTXWPRMFDTWSTCQW3RVWRIOV3Q", "sha1:C3YNNEGH5WAG5ZAAXWAEBNXJWT6CZ3WU")),
- sampleData.map(l => new Tuple(l.map(s => {new ImmutableBytesWritable(Bytes.toBytes(s))}):_*)))
- .sink[Tuple](Tsv(output format "get_list")) {
- outputBuffer =>
- log.debug("Output => " + outputBuffer)
-
- it("should return the test data provided.") {
- println("outputBuffer.size => " + outputBuffer.size)
- assert(outputBuffer.size === 2)
- }
- }
- .run
- .finish
-
-}