1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
|
import sys
import json
import base64
import itertools
import fatcat_openapi_client
from .common import EntityImporter, clean, make_rel_url, SANE_MAX_RELEASES, SANE_MAX_URLS, b32_hex
class IngestFileResultImporter(EntityImporter):
def __init__(self, api, require_grobid=True, **kwargs):
eg_desc = kwargs.pop('editgroup_description', None) or "Files crawled from web using sandcrawler ingest tool"
eg_extra = kwargs.pop('editgroup_extra', dict())
eg_extra['agent'] = eg_extra.get('agent', 'fatcat_tools.IngestFileResultImporter')
super().__init__(api,
editgroup_description=eg_desc,
editgroup_extra=eg_extra,
**kwargs)
self.default_link_rel = kwargs.get("default_link_rel", "web")
assert self.default_link_rel
self.default_mimetype = kwargs.get("default_mimetype", None)
self.do_updates = kwargs.get("do_updates", False)
self.require_grobid = require_grobid
if self.require_grobid:
print("Requiring GROBID status == 200")
else:
print("NOT checking GROBID success")
self.ingest_request_source_whitelist = [
'fatcat-changelog',
'fatcat-ingest-container',
]
if kwargs.get('skip_source_whitelist', False):
self.ingest_request_source_whitelist = []
def want(self, row):
"""
Logic here probably needs work (TODO):
- Direct ingests via DOI from fatcat-changelog should probably go
through regardless of GROBID status
- We should filter/block things like single-page PDFs here
- public/anonymous submissions could require successful biblio-glutton
match, or some other sanity check on the fatcat side (eg, fuzzy title
match)
- handle the case of release_stage not being 'published'; if pre-print,
potentially create a new release.
The current logic is intentionally conservative as a first step.
"""
if row.get('hit') != True:
self.counts['skip-hit'] += 1
return False
source = row['request'].get('ingest_request_source')
if self.ingest_request_source_whitelist and source not in self.ingest_request_source_whitelist:
self.counts['skip-ingest_request_source'] += 1
return False
if source.startswith('savepapernow'):
# never process async savepapernow requests
self.counts['skip-savepapernow'] += 1
return False
if not row.get('file_meta'):
self.counts['skip-file-meta'] += 1
return False
if self.require_grobid and row.get('grobid', {}).get('status_code') != 200:
self.counts['skip-grobid'] += 1
return False
return True
def parse_record(self, row):
request = row['request']
fatcat = request.get('fatcat')
file_meta = row['file_meta']
# identify release by fatcat ident, or extid lookup, or biblio-glutton match
release_ident = None
if fatcat and fatcat.get('release_ident'):
release_ident = fatcat.get('release_ident')
elif request.get('ext_ids'):
# if no fatcat ident, try extids
for extid_type in ('doi', 'pmid', 'pmcid', 'arxiv'):
extid = request['ext_ids'].get(extid_type)
if not extid:
continue
try:
release = self.api.lookup_release(**{extid_type: extid})
except fatcat_openapi_client.rest.ApiException as err:
if err.status == 404:
continue
elif err.status == 400:
self.counts['warn-extid-invalid'] += 1
continue
release_ident = release.ident
break
if not release_ident and row.get('grobid'):
# try biblio-glutton extracted hit
if row['grobid'].get('fatcat_ident'):
release = row['grobid']['fatcat_ident'].split('_')[-1]
if not release_ident:
self.counts['skip-release-not-found'] += 1
return None
cdx = row.get('cdx')
if not cdx:
# TODO: support archive.org hits?
self.counts['skip-no-cdx'] += 1
return None
url = make_rel_url(cdx['url'], self.default_link_rel)
if not url:
self.counts['skip-url'] += 1
return None
wayback = "https://web.archive.org/web/{}/{}".format(
cdx['datetime'],
cdx['url'])
urls = [url, ("webarchive", wayback)]
urls = [fatcat_openapi_client.FileUrl(rel=rel, url=url) for (rel, url) in urls]
fe = fatcat_openapi_client.FileEntity(
md5=file_meta['md5hex'],
sha1=file_meta['sha1hex'],
sha256=file_meta['sha256hex'],
size=file_meta['size_bytes'],
mimetype=file_meta['mimetype'] or self.default_mimetype,
release_ids=[release_ident],
urls=urls,
)
if fatcat and fatcat.get('edit_extra'):
fe.edit_extra = fatcat['edit_extra']
if request.get('ingest_request_source'):
if not fe.edit_extra:
fe.edit_extra = dict()
fe.edit_extra['ingest_request_source'] = request['ingest_request_source']
return fe
def try_update(self, fe):
# lookup sha1, or create new entity
existing = None
try:
existing = self.api.lookup_file(sha1=fe.sha1)
except fatcat_openapi_client.rest.ApiException as err:
if err.status != 404:
raise err
if not existing:
return True
if (fe.release_ids[0] in existing.release_ids) and existing.urls:
# TODO: could still, in theory update with the new URL?
self.counts['exists'] += 1
return False
if not self.do_updates:
self.counts['skip-update-disabled'] += 1
return False
# TODO: for now, never update
self.counts['skip-update-disabled'] += 1
return False
def insert_batch(self, batch):
self.api.create_file_auto_batch(fatcat_openapi_client.FileAutoBatch(
editgroup=fatcat_openapi_client.Editgroup(
description=self.editgroup_description,
extra=self.editgroup_extra),
entity_list=batch))
class SavePaperNowFileImporter(IngestFileResultImporter):
"""
This worker ingests from the same feed as IngestFileResultImporter, but
only imports files from anonymous save-paper-now requests, and "submits"
them for further human review (as opposed to accepting by default).
"""
def __init__(self, api, submit_mode=True, **kwargs):
eg_desc = kwargs.pop('editgroup_description', None) or "Files crawled after a public 'Save Paper Now' request"
eg_extra = kwargs.pop('editgroup_extra', dict())
eg_extra['agent'] = eg_extra.get('agent', 'fatcat_tools.IngestFileSavePaperNow')
kwargs['submit_mode'] = submit_mode
kwargs['require_grobid'] = True
kwargs['do_updates'] = False
super().__init__(api,
editgroup_description=eg_desc,
editgroup_extra=eg_extra,
**kwargs)
def want(self, row):
source = row['request'].get('ingest_request_source')
if not source.startswith('savepapernow'):
self.counts['skip-not-savepapernow'] += 1
return False
if row.get('hit') != True:
self.counts['skip-hit'] += 1
return False
if not row.get('file_meta'):
self.counts['skip-file-meta'] += 1
return False
if self.require_grobid and row.get('grobid', {}).get('status_code') != 200:
self.counts['skip-grobid'] += 1
return False
return True
def insert_batch(self, batch):
"""
Usually running in submit_mode, so we can't use auto_batch method
"""
if self.submit_mode:
eg = self.api.create_editgroup(fatcat_openapi_client.Editgroup(
description=self.editgroup_description,
extra=self.editgroup_extra))
for fe in batch:
self.api.create_file(eg.editgroup_id, fe)
self.api.update_editgroup(eg.editgroup_id, eg, submit=True)
else:
self.api.create_file_auto_batch(fatcat_openapi_client.FileAutoBatch(
editgroup=fatcat_openapi_client.Editgroup(
description=self.editgroup_description,
extra=self.editgroup_extra),
entity_list=batch))
|