1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
|
#!/usr/bin/env python3
"""
Tool for downloading fatcat release PDFs to disk (assuming there is at least
one accessible PDF file entity for each release).
Behavior:
- if no file, or not accessible, skip release
- filter files, then iterate through:
- if already exists locally on disk, skip
- try downloading from any archive.org or web.archive.org URLs
- verify SHA-1
- write out to disk
TODO:
x blob_path(sha1hex) -> returns relative/local path file would be saved to
x filter_files(files) -> list of files to try
- fetch_release(release) -> tries to download PDF bytes
- fetch_file(file) -> returns bytes of fetched file
- fetch_content(url) -> tries to download PDF bytes
LATER:
- GRBOID XML as well, from minio?
"""
# XXX: some broken MRO thing going on in here due to python3 object wrangling
# in `wayback` library. Means we can't run pylint.
# pylint: skip-file
import os
import sys
import json
import magic
import hashlib
import argparse
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry # pylint: disable=import-error
from collections import Counter
def gen_file_metadata(blob):
"""
Takes a file blob (bytestream) and returns hashes and other metadata.
Returns a dict: size_bytes, md5hex, sha1hex, sha256hex, mimetype
"""
assert blob
mimetype = magic.Magic(mime=True).from_buffer(blob)
hashes = [
hashlib.sha1(),
hashlib.sha256(),
hashlib.md5(),
]
for h in hashes:
h.update(blob)
return dict(
size_bytes=len(blob),
sha1hex=hashes[0].hexdigest(),
sha256hex=hashes[1].hexdigest(),
md5hex=hashes[2].hexdigest(),
mimetype=mimetype,
)
def requests_retry_session(retries=10, backoff_factor=3,
status_forcelist=(500, 502, 504), session=None):
"""
From: https://www.peterbe.com/plog/best-practice-with-retries-with-requests
"""
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
class DeliverFatcatDisk:
def __init__(self, disk_dir, **kwargs):
self.count = Counter()
self.disk_dir = disk_dir
self.disk_prefix = kwargs.get('disk_prefix', 'pdf/')
self.disk_suffix = kwargs.get('disk_suffix', '.pdf')
self.session = requests_retry_session()
def run(self, release_json_file):
sys.stderr.write("Ensuring all 256 base directories exist...\n")
for i in range(256):
fpath = "{}/{}{:02x}".format(
self.disk_dir,
self.disk_prefix,
i)
os.makedirs(fpath, exist_ok=True)
sys.stderr.write("Starting...\n")
for line in release_json_file:
self.count['total'] += 1
if not line.startswith('{'):
self.count['skip-no-release'] += 1
continue
#print(line)
release = json.loads(line)
assert 'ident' in release
self.fetch_release(release)
sys.stderr.write("{}\n".format(self.count))
def blob_path(self, sha1hex):
fpath = "{}/{}{}/{}{}".format(
self.disk_dir,
self.disk_prefix,
sha1hex[0:2],
sha1hex,
self.disk_suffix)
return fpath
def does_file_already_exist(self, sha1hex):
return os.path.isfile(self.blob_path(sha1hex))
def filter_files(self, files):
"""
Takes a list of file entities and only returns the ones which are PDFs
we can download.
"""
good = []
for f in files:
if f['mimetype'] and not 'pdf' in f['mimetype'].lower():
continue
for url in f['urls']:
if 'archive.org/' in url['url']:
good.append(f)
break
return good
def fetch_content(self, url):
"""
Returns tuple: (str:status, content)
Content contains bytes only if status is "success", otherwise None
"""
if '://web.archive.org/' in url:
# add id_ to URL to avoid wayback re-writing
l = url.split('/')
if l[2] == 'web.archive.org' and l[3] == 'web' and not '_' in l[4]:
l[4] = l[4] + 'id_'
url = '/'.join(l)
try:
resp = self.session.get(url)
except requests.exceptions.RetryError:
return ('wayback-error', None)
if resp.status_code != 200:
return ('fetch:{}'.format(resp.status_code), None)
else:
return ('success', resp.content)
def fetch_file(self, f):
"""
Returns tuple: (status, sha1hex, file_meta)
file_meta is a dict on success, or None otherwise
"""
sha1hex = f['sha1']
if self.does_file_already_exist(sha1hex):
return ('exists', sha1hex, None)
status = None
for url in f['urls']:
url = url['url']
if not 'archive.org' in url:
continue
status, content = self.fetch_content(url)
if status == 'success':
# TODO: verify sha1hex
file_meta = gen_file_metadata(content)
if file_meta['sha1hex'] != sha1hex:
status = 'sha1-mismatch'
continue
with open(self.blob_path(sha1hex), 'wb') as outf:
outf.write(content)
return ('success', sha1hex, file_meta)
if status:
return (status, sha1hex, None)
else:
return ('no-urls', sha1hex, None)
def fetch_release(self, release):
good_files = self.filter_files(release['files'])
status = 'no-file'
sha1hex = None
for f in good_files:
status, sha1hex, file_meta = self.fetch_file(f)
if status in ('success', 'exists'):
break
else:
continue
if sha1hex:
print("{}\t{}".format(status, sha1hex))
else:
print(status)
self.count[status] += 1
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--disk-dir',
required=True,
type=str,
help='local base directory to save into')
parser.add_argument('--disk-prefix',
type=str,
default="pdf/",
help='directory prefix for items created in bucket')
parser.add_argument('--disk-suffix',
type=str,
default=".pdf",
help='file suffix for created files')
parser.add_argument('release_json_file',
help="JSON manifest of fatcat release entities",
default=sys.stdin,
type=argparse.FileType('r'))
args = parser.parse_args()
worker = DeliverFatcatDisk(**args.__dict__)
worker.run(args.release_json_file)
if __name__ == '__main__': # pragma: no cover
main()
|