aboutsummaryrefslogtreecommitdiffstats
path: root/scalding/src/main/scala/sandcrawler/DumpGrobidMetaInsertableJob.scala
blob: ee2b7c29d7018bc5310a44fb9372972cab4d5d8a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
package sandcrawler

import java.util.Properties

import cascading.property.AppProps
import cascading.tuple.Fields
import com.twitter.scalding._
import com.twitter.scalding.typed.TDsl._
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.util.Bytes
import parallelai.spyglass.base.JobBase
import parallelai.spyglass.hbase.HBaseConstants.SourceMode
import parallelai.spyglass.hbase.HBasePipeConversions
import parallelai.spyglass.hbase.HBaseSource

// Dumps the SHA1 key and grobid0:metadata columns, plus file metadata needed
// to insert into fatcat. Used, eg, as part of long-tail mellon pipeline.
class DumpGrobidMetaInsertableJob(args: Args) extends JobBase(args) with HBasePipeConversions {

  val metaPipe : TypedPipe[(String, String, String, Long, String)] = HBaseBuilder.build(args("hbase-table"),
                     args("zookeeper-hosts"),
                     List("file:cdx", "file:mime", "file:size", "grobid0:metadata"),
                     SourceMode.SCAN_ALL)
    .read
    .toTypedPipe[(ImmutableBytesWritable,ImmutableBytesWritable,ImmutableBytesWritable,ImmutableBytesWritable,ImmutableBytesWritable)](new Fields("key", "cdx", "mime", "size", "metadata"))
    .filter { case (_, cdx, mime, size, metadata) => cdx != null && mime != null && size != null && metadata != null }
    .map { case (key, cdx, mime, size, metadata) =>
      (Bytes.toString(key.copyBytes()),
       Bytes.toString(cdx.copyBytes()),
       Bytes.toString(mime.copyBytes()),
       Bytes.toLong(size.copyBytes()),
       Bytes.toString(metadata.copyBytes())
      )
    };

  metaPipe.write(TypedTsv[(String,String,String,Long,String)](args("output")))

}