aboutsummaryrefslogtreecommitdiffstats
path: root/scalding
diff options
context:
space:
mode:
authorBryan Newbold <bnewbold@archive.org>2018-08-15 20:20:43 -0700
committerBryan Newbold <bnewbold@archive.org>2018-08-15 20:20:43 -0700
commitdf341a68459829380f1f01015768acee5642f15b (patch)
treeac707bad5e11bc15b6823ceefb22300ec1cb0266 /scalding
parentfafe5b1b2d8f34c6f336b7ae1a48cc78deb90c11 (diff)
downloadsandcrawler-df341a68459829380f1f01015768acee5642f15b.tar.gz
sandcrawler-df341a68459829380f1f01015768acee5642f15b.zip
grobid scoring: status_code as signed int, not string
Diffstat (limited to 'scalding')
-rw-r--r--scalding/src/main/scala/sandcrawler/GrobidScorable.scala9
-rw-r--r--scalding/src/test/scala/sandcrawler/ScoreJobTest.scala5
2 files changed, 10 insertions, 4 deletions
diff --git a/scalding/src/main/scala/sandcrawler/GrobidScorable.scala b/scalding/src/main/scala/sandcrawler/GrobidScorable.scala
index c319fe6..f484fad 100644
--- a/scalding/src/main/scala/sandcrawler/GrobidScorable.scala
+++ b/scalding/src/main/scala/sandcrawler/GrobidScorable.scala
@@ -6,6 +6,8 @@ import cascading.flow.FlowDef
import cascading.tuple.Fields
import com.twitter.scalding._
import com.twitter.scalding.typed.TDsl._
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable
+import org.apache.hadoop.hbase.util.Bytes
import parallelai.spyglass.hbase.HBaseConstants.SourceMode
import parallelai.spyglass.hbase.HBasePipeConversions
import parallelai.spyglass.hbase.HBaseSource
@@ -21,8 +23,11 @@ class GrobidScorable extends Scorable with HBasePipeConversions {
def getFeaturesPipe(args : Args)(implicit mode : Mode, flowDef : FlowDef) : TypedPipe[MapFeatures] = {
getSource(args)
.read
- .fromBytesWritable(new Fields("key", "tei_json", "status_code"))
- .toTypedPipe[(String, String, Int)](new Fields("key", "tei_json", "status_code"))
+ // Can't just "fromBytesWritable" because we have multiple types?
+ .toTypedPipe[(ImmutableBytesWritable,ImmutableBytesWritable,ImmutableBytesWritable)](new Fields("key", "tei_json", "status_code"))
+ .map { case (key, tei_json, status_code) =>
+ (Bytes.toString(key.copyBytes()), Bytes.toString(tei_json.copyBytes()), Bytes.toLong(status_code.copyBytes()))
+ }
// TODO: Should I combine next two stages for efficiency?
.collect { case (key, json, StatusOK) => (key, json) }
.map { entry : (String, String) => GrobidScorable.jsonToMapFeatures(entry._1, entry._2) }
diff --git a/scalding/src/test/scala/sandcrawler/ScoreJobTest.scala b/scalding/src/test/scala/sandcrawler/ScoreJobTest.scala
index 34081a5..f68ee1d 100644
--- a/scalding/src/test/scala/sandcrawler/ScoreJobTest.scala
+++ b/scalding/src/test/scala/sandcrawler/ScoreJobTest.scala
@@ -150,8 +150,9 @@ class ScoreJobTest extends FlatSpec with Matchers {
JsonString.replace("<<TITLE>>", "Title 2")
)
- val Ok = "200"
- val Bad = "400"
+ // bnewbold: status codes aren't strings, they are uint64
+ val Ok : Long = 200
+ val Bad : Long = 400
val StatusCodes = List(Ok, Ok, Ok, Bad, Ok, Bad)
val SampleData : List[List[Array[Byte]]] = (Sha1Strings, JsonStrings, StatusCodes)