aboutsummaryrefslogtreecommitdiffstats
path: root/backfill
diff options
context:
space:
mode:
authorBryan Newbold <bnewbold@archive.org>2018-03-30 19:12:31 -0700
committerBryan Newbold <bnewbold@archive.org>2018-03-30 19:12:31 -0700
commitbb38ea065758a719331803b4adf875f2d75a1c3d (patch)
tree84fa8cfca80b10c201ce28ec089557a8a154eb53 /backfill
parentf6915b4b44e312cee7eda9626d0330268ab786e2 (diff)
downloadsandcrawler-bb38ea065758a719331803b4adf875f2d75a1c3d.tar.gz
sandcrawler-bb38ea065758a719331803b4adf875f2d75a1c3d.zip
clean up backfill code/tests
Diffstat (limited to 'backfill')
-rwxr-xr-xbackfill/backfill_hbase_from_cdx.py35
-rw-r--r--backfill/tests/test_backfill_hbase_from_cdx.py31
2 files changed, 42 insertions, 24 deletions
diff --git a/backfill/backfill_hbase_from_cdx.py b/backfill/backfill_hbase_from_cdx.py
index 92a6d32..d14dd92 100755
--- a/backfill/backfill_hbase_from_cdx.py
+++ b/backfill/backfill_hbase_from_cdx.py
@@ -18,6 +18,7 @@ TODO:
import sys
import json
+from datetime import datetime
import happybase
import mrjob
from mrjob.job import MRJob
@@ -27,7 +28,6 @@ NORMAL_MIME = (
'application/postscript',
'text/html',
'text/xml',
- #'application/xml',
)
def normalize_mime(raw):
@@ -62,15 +62,31 @@ def transform_line(raw_cdx):
url = cdx[2]
mime = normalize_mime(cdx[3])
http_status = cdx[4]
- if http_status != "200":
- return None
key = cdx[5]
c_size = cdx[8]
offset = cdx[9]
warc = cdx[10]
+
+ if not (key.isalnum() and c_size.isdigit() and offset.isdigit()
+ and http_status == "200" and len(key) == 32 and dt.isdigit()):
+ return None
+
+ if '-' in (surt, dt, url, mime, http_status, key, c_size, offset, warc):
+ return None
+
info = dict(surt=surt, dt=dt, url=url, c_size=c_size, offset=offset,
warc=warc)
- return {'key': key, 'file:mime': mime, 'file:cdx': info}
+
+ warc_file = warc.split('/')[-1]
+ dt_iso = datetime.strptime(dt, "%Y%m%d%H%M%S").isoformat()
+ try:
+ dt_iso = datetime.strptime(dt, "%Y%m%d%H%M%S").isoformat()
+ except:
+ return None
+
+ # 'i' intentionally not set
+ heritrix = dict(u=url, d=dt_iso, f=warc_file, o=offset, c="1")
+ return {'key': key, 'file:mime': mime, 'file:cdx': info, 'f:c': heritrix}
def test_transform_line():
@@ -85,6 +101,13 @@ def test_transform_line():
'warc': "SEMSCHOLAR-PDF-CRAWL-2017-08-04-20170828231135742-00000-00009-wbgrp-svc284/SEMSCHOLAR-PDF-CRAWL-2017-08-04-20170828232253025-00005-3480~wbgrp-svc284.us.archive.org~8443.warc.gz",
'offset': "931661233",
'c_size': "210251",
+ },
+ 'f:c': {
+ 'u': "https://www.ldc.upenn.edu/sites/www.ldc.upenn.edu/files/medar2009-large-arabic-broadcast-collection.pdf",
+ 'd': "2017-08-28T23:31:54",
+ 'f': "SEMSCHOLAR-PDF-CRAWL-2017-08-04-20170828232253025-00005-3480~wbgrp-svc284.us.archive.org~8443.warc.gz",
+ 'o': "931661233",
+ 'c': "1",
}
}
@@ -120,6 +143,7 @@ class MRCDXBackfillHBase(MRJob):
self.hb_table = None
super(MRCDXBackfillHBase, self).__init__(*args, **kwargs)
+ self.mime_filter = ['application/pdf']
def mapper_init(self):
@@ -131,8 +155,6 @@ class MRCDXBackfillHBase(MRJob):
raise Exception("Couldn't connect to HBase using host: {}".format(host))
self.hb_table = hb_conn.table(self.options.hbase_table)
- self.mime_filter = ['application/pdf']
-
def mapper(self, _, raw_cdx):
self.increment_counter('lines', 'total')
@@ -154,6 +176,7 @@ class MRCDXBackfillHBase(MRJob):
return
key = info.pop('key')
+ info['f:c'] = json.dumps(info['f:c'], sort_keys=True, indent=None)
info['file:cdx'] = json.dumps(info['file:cdx'], sort_keys=True,
indent=None)
diff --git a/backfill/tests/test_backfill_hbase_from_cdx.py b/backfill/tests/test_backfill_hbase_from_cdx.py
index dfed0b3..d8277be 100644
--- a/backfill/tests/test_backfill_hbase_from_cdx.py
+++ b/backfill/tests/test_backfill_hbase_from_cdx.py
@@ -8,29 +8,18 @@ from backfill_hbase_from_cdx import MRCDXBackfillHBase
@pytest.fixture
def job():
+ """
+ Note: this mock only seems to work with job.run_mapper(), not job.run();
+ the later results in a separate instantiation without the mock?
+ """
conn = happybase_mock.Connection()
- conn.create_table('wbgrp-journal-extract-test', {'file': {}, 'grobid0': {}})
+ conn.create_table('wbgrp-journal-extract-test',
+ {'file': {}, 'grobid0': {}, 'f': {}})
table = conn.table('wbgrp-journal-extract-test')
job = MRCDXBackfillHBase(['--no-conf', '-'], hb_table=table)
- job.hb_table = table
return job
-#Example to read back rows...
-"""
-def basic_job_run_capturing_output(job):
-
- job.sandbox(stdin=open('tests/files/example.cdx', 'r'))
- results = []
- with job.make_runner() as runner:
- runner.run()
- for key, value in job.parse_output(runner.cat_output()):
- results.append(value)
-
- print(results)
- assert len(list(job.hb_table.scan())) == 5
-"""
-
def test_some_lines(job):
raw = io.BytesIO(b"""
@@ -52,4 +41,10 @@ com,pbworks,educ333b)/robots.txt 20170705063311 http://educ333b.pbworks.com/robo
row = job.hb_table.row(b'MPCXVWMUTRUGFP36SLPHKDLY6NGU4S3J')
assert row[b'file:mime'] == b"application/pdf"
- json.loads(row[b'file:cdx'].decode('utf-8'))
+
+ file_cdx = json.loads(row[b'file:cdx'].decode('utf-8'))
+ assert int(file_cdx['offset']) == 328850624
+
+ f_c = json.loads(row[b'f:c'].decode('utf-8'))
+ assert f_c['u'] == "http://cadmus.eui.eu/bitstream/handle/1814/36635/RSCAS_2015_03.pdf%3Bjsessionid%3D761393014319A39F40D32AE3EB3A853F?sequence%3D1"
+ assert b'i' not in f_c