aboutsummaryrefslogtreecommitdiffstats
path: root/scalding/src/main/scala/sandcrawler/GrobidScorable.scala
blob: 5dac64cfbfdb71a3eb52ed32a8c583d18bb675f0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
package sandcrawler

import cascading.flow.FlowDef
import cascading.pipe.Pipe
import cascading.tuple.Fields
import com.twitter.scalding._
import com.twitter.scalding.typed.TDsl._
import parallelai.spyglass.hbase.HBaseConstants.SourceMode
import parallelai.spyglass.hbase.HBasePipeConversions
import parallelai.spyglass.hbase.HBaseSource

class GrobidScorable extends Scorable with HBasePipeConversions {
  def getFeaturesPipe(args : Args)(implicit flowDef : FlowDef, mode : Mode) = {
    // TODO: Clean up code after debugging.
    val grobidSource = HBaseCrossrefScore.getHBaseSource(
      args("hbase-table"),
      args("zookeeper-hosts"))

    val pipe0 : Pipe = grobidSource.read
    val grobidPipe : TypedPipe[MapFeatures] = pipe0
    .fromBytesWritable(new Fields("key", "tei_json"))
    //  .debug  // Should be 4 tuples for mocked data
    // TODO: Figure out why this line (used in HBaseCrossrefScoreJob.scala)
    // didn't work here: .toTypedPipe[(String, String)]('key, 'tei_json)
    .toTypedPipe[(String, String)](new Fields("key", "tei_json"))
    .map { entry =>
      val (key : String, json : String) = (entry._1, entry._2)
      HBaseCrossrefScore.grobidToSlug(json) match {
        case Some(slug) => new MapFeatures(slug, key, json)
        case None => new MapFeatures(Scorable.NoSlug, key, json)
      }
    }
    .filter {
      _.slug != Scorable.NoSlug
    }
    grobidPipe
  }
/*
  def fromBytesWritableLocal(f: Fields): Pipe = {
	asList(f)
	  .foldLeft(pipe) { (p, fld) => {
	    p.map(fld.toString -> fld.toString) { from: org.apache.hadoop.hbase.io.ImmutableBytesWritable =>
            Option(from).map(x => Bytes.toString(x.get)).getOrElse(null)
          }
      }}
  }
 */
}