aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBryan Newbold <bnewbold@archive.org>2018-08-20 17:52:48 -0700
committerBryan Newbold <bnewbold@archive.org>2018-08-21 21:25:55 -0700
commit6aeafb083d73be8cf3296707c3e558d825202bce (patch)
tree4bb9f0cbf3568313901f31cfcd741b1624e95669
parent9d5df7b168ca7d6e5b95542de273dde1c4c6e5a9 (diff)
downloadsandcrawler-6aeafb083d73be8cf3296707c3e558d825202bce.tar.gz
sandcrawler-6aeafb083d73be8cf3296707c3e558d825202bce.zip
fold all scorable code into sanity check; counters
-rw-r--r--scalding/src/main/scala/sandcrawler/GrobidScorableDumpJob.scala59
1 files changed, 52 insertions, 7 deletions
diff --git a/scalding/src/main/scala/sandcrawler/GrobidScorableDumpJob.scala b/scalding/src/main/scala/sandcrawler/GrobidScorableDumpJob.scala
index 9a8d701..5e06f9b 100644
--- a/scalding/src/main/scala/sandcrawler/GrobidScorableDumpJob.scala
+++ b/scalding/src/main/scala/sandcrawler/GrobidScorableDumpJob.scala
@@ -2,17 +2,62 @@
package sandcrawler
import cascading.pipe.Pipe
-import com.twitter.scalding.Args
-import com.twitter.scalding.TypedPipe
-import com.twitter.scalding.TypedTsv
+import com.twitter.scalding._
+import com.twitter.scalding.typed.TDsl._
import parallelai.spyglass.base.JobBase
+import cascading.flow.FlowDef
+import cascading.tuple.Fields
+import com.twitter.scalding._
+import com.twitter.scalding.typed.TDsl._
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable
+import org.apache.hadoop.hbase.util.Bytes
+import parallelai.spyglass.hbase.HBaseConstants.SourceMode
+import parallelai.spyglass.hbase.HBasePipeConversions
+import parallelai.spyglass.hbase.HBaseSource
+
class GrobidScorableDumpJob(args: Args) extends JobBase(args) {
- val sc1 : Scorable = new GrobidScorable()
- val pipe1 : TypedPipe[(String, ReduceFeatures)] = sc1.getInputPipe(args)
+ val grobidHbaseRows = Stat("hbase-rows-scanned", "hbase-grobid-dump")
+ val filteredGrobidRows = Stat("grobid-rows-filtered", "hbase-grobid-dump")
+ val parsedGrobidRows = Stat("grobid-rows-parsed", "hbase-grobid-dump")
+ val validGrobidRows = Stat("grobid-rows-valid-slug", "hbase-grobid-dump")
+
+ val pipe = GrobidScorable.getHBaseSource(args("hbase-table"), args("zookeeper-hosts"))
+ .read
+ // Can't just "fromBytesWritable" because we have multiple types?
+ .toTypedPipe[(ImmutableBytesWritable,ImmutableBytesWritable,ImmutableBytesWritable)](new Fields("key", "tei_json", "status_code"))
+ .filter { case (_, tei_json, status_code) =>
+ grobidHbaseRows.inc
+ tei_json != null && status_code != null
+ }
+ .map { case (key, tei_json, status_code) =>
+ (Bytes.toString(key.copyBytes()), Bytes.toString(tei_json.copyBytes()), Bytes.toLong(status_code.copyBytes()))
+ }
+ // TODO: Should I combine next two stages for efficiency?
+ .collect { case (key, json, 200) =>
+ filteredGrobidRows.inc
+ (key, json)
+ }
+ .map { entry : (String, String) =>
+ parsedGrobidRows.inc
+ GrobidScorable.jsonToMapFeatures(entry._1, entry._2)
+ }
+ .filter { entry => Scorable.isValidSlug(entry.slug) }
+ .map { entry =>
+ validGrobidRows.inc
+ entry
+ }
+ // XXX: this groupBy after the map?
+ .groupBy { case MapFeatures(slug, json) => slug }
+ .map { tuple =>
+ val (slug : String, features : MapFeatures) = tuple
+ (slug, ReduceFeatures(features.json))
+ }
- pipe1
- .map { case (slug, features) => (slug, features.json) }
+ pipe
+ .map { case (slug, features) =>
+ (slug, features.json)
+ }
.write(TypedTsv[(String, String)](args("output")))
}