diff options
Diffstat (limited to 'scald-mvp/src/main/scala')
3 files changed, 0 insertions, 89 deletions
diff --git a/scald-mvp/src/main/scala/example/SimpleHBaseSourceExample.scala b/scald-mvp/src/main/scala/example/SimpleHBaseSourceExample.scala deleted file mode 100644 index fe2a120..0000000 --- a/scald-mvp/src/main/scala/example/SimpleHBaseSourceExample.scala +++ /dev/null @@ -1,36 +0,0 @@ -package example - -import com.twitter.scalding.{Tsv, Args} -import parallelai.spyglass.base.JobBase -import org.apache.log4j.{Level, Logger} -import parallelai.spyglass.hbase.{HBasePipeConversions, HBaseSource} -import parallelai.spyglass.hbase.HBaseConstants.SourceMode -import cascading.tuple.Fields -import cascading.property.AppProps -import java.util.Properties - -/** - * Simple example of HBaseSource usage - */ -class SimpleHBaseSourceExample(args: Args) extends JobBase(args) with HBasePipeConversions { - - val isDebug: Boolean = args("debug").toBoolean - - if (isDebug) Logger.getRootLogger.setLevel(Level.DEBUG) - - val output = args("output") - - val hbs = new HBaseSource( - "table_name", - //"quorum_name:2181", - "mtrcs-zk1.us.archive.org:2181", // HBase Zookeeper server (to get runtime config info; can be array?) - new Fields("key"), - List("column_family"), - List(new Fields("column_name1", "column_name2")), - sourceMode = SourceMode.GET_LIST, keyList = List("1", "2", "3")) - .read - .debug - .fromBytesWritable(new Fields("key", "column_name1", "column_name2")) - .write(Tsv(output format "get_list")) - - } diff --git a/scald-mvp/src/main/scala/example/WordCountJob.scala b/scald-mvp/src/main/scala/example/WordCountJob.scala deleted file mode 100644 index 0e63fed..0000000 --- a/scald-mvp/src/main/scala/example/WordCountJob.scala +++ /dev/null @@ -1,12 +0,0 @@ -package example - -import com.twitter.scalding._ - -class WordCountJob(args: Args) extends Job(args) { - TypedPipe.from(TextLine(args("input"))) - .flatMap { line => line.split("\\s+") } - .map { word => (word, 1L) } - .sumByKey - // The compiler will enforce the type coming out of the sumByKey is the same as the type we have for our sink - .write(TypedTsv[(String, Long)](args("output"))) -} diff --git a/scald-mvp/src/main/scala/sandcrawler/HBaseRowCountJob.scala b/scald-mvp/src/main/scala/sandcrawler/HBaseRowCountJob.scala deleted file mode 100644 index 5df6b2e..0000000 --- a/scald-mvp/src/main/scala/sandcrawler/HBaseRowCountJob.scala +++ /dev/null @@ -1,41 +0,0 @@ -package sandcrawler - -import com.twitter.scalding._ -import parallelai.spyglass.base.JobBase -import parallelai.spyglass.hbase.{HBaseSource, HBasePipeConversions, HBaseConstants} -import parallelai.spyglass.hbase.HBaseConstants.SourceMode -import cascading.tuple.Fields -import cascading.property.AppProps -import java.util.Properties - - -class HBaseRowCountJob(args: Args) extends JobBase(args) with HBasePipeConversions { - - - // For now doesn't actually count, just dumps a "word count" - - val output = args("output") - - val hbs = new HBaseSource( - //"table_name", - //"quorum_name:2181", - "wbgrp-journal-extract-0-qa", // HBase Table Name - "mtrcs-zk1.us.archive.org:2181", // HBase Zookeeper server (to get runtime config info; can be array?) - new Fields("key"), - List("file"), - List(new Fields("size", "mimetype")), - sourceMode = SourceMode.GET_LIST, keyList = List("sha1:K2DKSSVTXWPRMFDTWSTCQW3RVWRIOV3Q", "sha1:C3YNNEGH5WAG5ZAAXWAEBNXJWT6CZ3WU")) - .read - .debug - .fromBytesWritable(new Fields("key")) - .write(Tsv(output format "get_list")) - - /* - List("column_family"), - sourceMode = SourceMode.SCAN_ALL) - .read - .debug - .fromBytesWritable(new Fields("key")) - .write(Tsv(output format "get_list")) - */ -} |