diff options
author | Bryan Newbold <bnewbold@archive.org> | 2018-08-20 18:43:11 -0700 |
---|---|---|
committer | Bryan Newbold <bnewbold@archive.org> | 2018-08-21 21:25:56 -0700 |
commit | 29eb22bc60193e6f919d98ec14a39ecd53477331 (patch) | |
tree | fd3c99ce853bc5036dc2572eb04c1d3b1f6b8a38 | |
parent | 39bf4b57cd552e8042bfa25565b390cb2a456ab0 (diff) | |
download | sandcrawler-29eb22bc60193e6f919d98ec14a39ecd53477331.tar.gz sandcrawler-29eb22bc60193e6f919d98ec14a39ecd53477331.zip |
use grobid0:metadata, not tei_json
This is for efficiency. I had forgotten that the extract script actually
writes this path!
-rw-r--r-- | scalding/src/main/scala/sandcrawler/GrobidScorable.scala | 10 | ||||
-rw-r--r-- | scalding/src/main/scala/sandcrawler/GrobidScorableDumpJob.scala | 10 |
2 files changed, 10 insertions, 10 deletions
diff --git a/scalding/src/main/scala/sandcrawler/GrobidScorable.scala b/scalding/src/main/scala/sandcrawler/GrobidScorable.scala index d7a1eea..e510f75 100644 --- a/scalding/src/main/scala/sandcrawler/GrobidScorable.scala +++ b/scalding/src/main/scala/sandcrawler/GrobidScorable.scala @@ -24,10 +24,10 @@ class GrobidScorable extends Scorable with HBasePipeConversions { getSource(args) .read // Can't just "fromBytesWritable" because we have multiple types? - .toTypedPipe[(ImmutableBytesWritable,ImmutableBytesWritable,ImmutableBytesWritable)](new Fields("key", "tei_json", "status_code")) - .filter { case (_, tei_json, status_code) => tei_json != null && status_code != null } - .map { case (key, tei_json, status_code) => - (Bytes.toString(key.copyBytes()), Bytes.toString(tei_json.copyBytes()), Bytes.toLong(status_code.copyBytes())) + .toTypedPipe[(ImmutableBytesWritable,ImmutableBytesWritable,ImmutableBytesWritable)](new Fields("key", "metadata", "status_code")) + .filter { case (_, metadata, status_code) => metadata != null && status_code != null } + .map { case (key, metadata, status_code) => + (Bytes.toString(key.copyBytes()), Bytes.toString(metadata.copyBytes()), Bytes.toLong(status_code.copyBytes())) } // TODO: Should I combine next two stages for efficiency? .collect { case (key, json, StatusOK) => (key, json) } @@ -37,7 +37,7 @@ class GrobidScorable extends Scorable with HBasePipeConversions { object GrobidScorable { def getHBaseSource(table : String, host : String) : HBaseSource = { - HBaseBuilder.build(table, host, List("grobid0:tei_json", "grobid0:status_code"), SourceMode.SCAN_ALL) + HBaseBuilder.build(table, host, List("grobid0:metadata", "grobid0:status_code"), SourceMode.SCAN_ALL) } def jsonToMapFeatures(key : String, json : String) : MapFeatures = { diff --git a/scalding/src/main/scala/sandcrawler/GrobidScorableDumpJob.scala b/scalding/src/main/scala/sandcrawler/GrobidScorableDumpJob.scala index 5e06f9b..05e7074 100644 --- a/scalding/src/main/scala/sandcrawler/GrobidScorableDumpJob.scala +++ b/scalding/src/main/scala/sandcrawler/GrobidScorableDumpJob.scala @@ -26,13 +26,13 @@ class GrobidScorableDumpJob(args: Args) extends JobBase(args) { val pipe = GrobidScorable.getHBaseSource(args("hbase-table"), args("zookeeper-hosts")) .read // Can't just "fromBytesWritable" because we have multiple types? - .toTypedPipe[(ImmutableBytesWritable,ImmutableBytesWritable,ImmutableBytesWritable)](new Fields("key", "tei_json", "status_code")) - .filter { case (_, tei_json, status_code) => + .toTypedPipe[(ImmutableBytesWritable,ImmutableBytesWritable,ImmutableBytesWritable)](new Fields("key", "metadata", "status_code")) + .filter { case (_, metadata, status_code) => grobidHbaseRows.inc - tei_json != null && status_code != null + metadata != null && status_code != null } - .map { case (key, tei_json, status_code) => - (Bytes.toString(key.copyBytes()), Bytes.toString(tei_json.copyBytes()), Bytes.toLong(status_code.copyBytes())) + .map { case (key, metadata, status_code) => + (Bytes.toString(key.copyBytes()), Bytes.toString(metadata.copyBytes()), Bytes.toLong(status_code.copyBytes())) } // TODO: Should I combine next two stages for efficiency? .collect { case (key, json, 200) => |