aboutsummaryrefslogtreecommitdiffstats
path: root/scalding/src/main/scala/sandcrawler/ScoreInsertable.scala
blob: 58007fa7d346546cc46b67d0e4fbf0453e8f69e2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
package sandcrawler

import cascading.tuple.Fields
import cascading.pipe.Pipe
import com.twitter.scalding._
import com.twitter.scalding.typed.TDsl._
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.util.Bytes
import parallelai.spyglass.base.JobBase
import parallelai.spyglass.hbase.HBaseConstants.SourceMode
import parallelai.spyglass.hbase.HBasePipeConversions
import parallelai.spyglass.hbase.HBaseSource

class ScoreInsertableJob(args: Args) extends JobBase(args) {

  val grobidRowCount = Stat("grobid-rows-filtered", "sandcrawler")
  val crossrefRowCount = Stat("crossref-rows-filtered", "sandcrawler")
  val cdxRowCount = Stat("cdx-rows", "sandcrawler")
  val scoredRowCount = Stat("scored-rows", "sandcrawler")
  val joinedRowCount = Stat("joined-rows", "sandcrawler")

  val grobidScorable : Scorable = new GrobidScorable()
  val crossrefScorable : Scorable = new CrossrefScorable()

  val grobidPipe : TypedPipe[(String, ReduceFeatures)] = grobidScorable
    .getInputPipe(args)
    .map { r =>
      grobidRowCount.inc
      r
    }
  val crossrefPipe : TypedPipe[(String, ReduceFeatures)] = crossrefScorable
    .getInputPipe(args)
    .map { r =>
      crossrefRowCount.inc
      r
    }
  val cdxPipe : TypedPipe[(String, String, String, Long)] = ScoreInsertableJob.getHBaseCdxSource(args("hbase-table"), args("zookeeper-hosts"))
    .read
    .toTypedPipe[(ImmutableBytesWritable,ImmutableBytesWritable,ImmutableBytesWritable,ImmutableBytesWritable)](new Fields("key", "cdx", "mime", "size"))
    .filter { case (_, cdx, mime, size) => cdx != null && mime != null && size != null }
    .map { case (key, cdx, mime, size) =>
      (Bytes.toString(key.copyBytes()),
       Bytes.toString(cdx.copyBytes()),
       Bytes.toString(mime.copyBytes()),
       Bytes.toLong(size.copyBytes()))
    }
    .map { r =>
      cdxRowCount.inc
      r
    }

  val scoredPipe = grobidPipe
    .addTrap(TypedTsv(args("output") + ".trapped"))
    .join(crossrefPipe)
    .map { case (slug, (grobidFeatures, crossrefFeatures)) =>
      scoredRowCount.inc
      //val (slug : String, (grobidFeatures: ReduceFeatures, crossrefFeatures: ReduceFeatures)) = entry
      // Not ever Empty, I promise
      val key = Scorable.getStringOption(Scorable.jsonToMap(grobidFeatures.json), "sha1").orNull
      (key, new ReduceOutput(
        slug,
        Scorable.computeSimilarity(grobidFeatures, crossrefFeatures),
        grobidFeatures.json,
        crossrefFeatures.json))
    }
    .map { case (key, entry) => (key, entry.slug, entry.score, entry.json1, entry.json2) }
    .groupBy { case (key, _, _, _, _) => key }

  // TypedTsv doesn't work over case classes.
  val joinedPipe = scoredPipe
    .join(cdxPipe.groupBy { case (key, _, _, _) => key })
    .map { case (key, ((_, slug, score, left, right), (_, cdx, mime, size))) => (key, slug, score, left, right, cdx, mime, size) }
    .write(TypedTsv[(String, String, Int, String, String, String, String, Long)](args("output")))
}

object ScoreInsertableJob {

  // eg, "wbgrp-journal-extract-0-qa",7 "mtrcs-zk1.us.archive.org:2181"
  def getHBaseCdxSource(hbaseTable: String, zookeeperHosts: String) : HBaseSource = {
    HBaseBuilder.build(
      hbaseTable,
      zookeeperHosts,
      List("file:cdx", "file:mime", "file:size"),
      SourceMode.SCAN_ALL)
  }
}