1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
|
import json
import pytest
from fatcat_tools.importers import IngestFileResultImporter, IngestWebResultImporter, JsonLinePusher
from fixtures import *
@pytest.fixture(scope="function")
def ingest_importer(api):
yield IngestFileResultImporter(api)
@pytest.fixture(scope="function")
def ingest_web_importer(api):
yield IngestWebResultImporter(api)
# TODO: use API to check that entities actually created...
def test_ingest_importer_basic(ingest_importer):
with open('tests/files/example_ingest.json', 'r') as f:
JsonLinePusher(ingest_importer, f).run()
def test_ingest_importer(ingest_importer):
last_index = ingest_importer.api.get_changelog(limit=1)[0].index
with open('tests/files/example_ingest.json', 'r') as f:
ingest_importer.bezerk_mode = True
counts = JsonLinePusher(ingest_importer, f).run()
assert counts['insert'] == 1
assert counts['exists'] == 0
assert counts['skip'] == 1
# fetch most recent editgroup
change = ingest_importer.api.get_changelog_entry(index=last_index+1)
eg = change.editgroup
assert eg.description
assert "crawled from web" in eg.description.lower()
assert eg.extra['git_rev']
assert "fatcat_tools.IngestFileResultImporter" in eg.extra['agent']
# re-insert; should skip
with open('tests/files/example_ingest.json', 'r') as f:
ingest_importer.reset()
ingest_importer.bezerk_mode = False
counts = JsonLinePusher(ingest_importer, f).run()
assert counts['insert'] == 0
assert counts['exists'] == 1
assert counts['skip'] == 1
def test_ingest_importer_xml(ingest_importer):
last_index = ingest_importer.api.get_changelog(limit=1)[0].index
with open('tests/files/example_ingest_xml.json', 'r') as f:
ingest_importer.bezerk_mode = True
counts = JsonLinePusher(ingest_importer, f).run()
print(counts)
assert counts['insert'] == 1
assert counts['exists'] == 0
assert counts['skip'] == 0
# fetch most recent editgroup
change = ingest_importer.api.get_changelog_entry(index=last_index+1)
eg = change.editgroup
assert eg.description
assert "crawled from web" in eg.description.lower()
assert eg.extra['git_rev']
assert "fatcat_tools.IngestFileResultImporter" in eg.extra['agent']
# re-import should skip
with open('tests/files/example_ingest_xml.json', 'r') as f:
ingest_importer.reset()
ingest_importer.bezerk_mode = False
counts = JsonLinePusher(ingest_importer, f).run()
assert counts['insert'] == 0
assert counts['exists'] == 1
assert counts['skip'] == 0
def test_ingest_importer_web(ingest_web_importer):
last_index = ingest_web_importer.api.get_changelog(limit=1)[0].index
with open('tests/files/example_ingest_html.json', 'r') as f:
ingest_web_importer.bezerk_mode = True
counts = JsonLinePusher(ingest_web_importer, f).run()
print(counts)
assert counts['insert'] == 1
assert counts['exists'] == 0
assert counts['skip'] == 0
# fetch most recent editgroup
change = ingest_web_importer.api.get_changelog_entry(index=last_index+1)
eg = change.editgroup
assert eg.description
assert "crawled from web" in eg.description.lower()
assert eg.extra['git_rev']
assert "fatcat_tools.IngestWebResultImporter" in eg.extra['agent']
# re-import should skip
with open('tests/files/example_ingest_html.json', 'r') as f:
ingest_web_importer.reset()
ingest_web_importer.bezerk_mode = False
counts = JsonLinePusher(ingest_web_importer, f).run()
assert counts['insert'] == 0
assert counts['exists'] == 1
assert counts['skip'] == 0
def test_ingest_importer_stage(ingest_importer, api):
"""
Tests that ingest importer correctly handles release stage matching
"""
test_table = [
dict(request_stage=None, release_stage=None, status="insert"),
dict(request_stage="published", release_stage=None, status="insert"),
dict(request_stage=None, release_stage="draft", status="insert"),
dict(request_stage="published", release_stage="published", status="insert"),
dict(request_stage="draft", release_stage="published", status="skip-release-stage"),
dict(request_stage="published", release_stage="draft", status="skip-release-stage"),
]
ingest_importer.bezerk_mode = True
with open('tests/files/example_ingest.json', 'r') as f:
raw = json.loads(f.readline())
for row in test_table:
#print(row)
# set dummy record stage
eg = quick_eg(api)
r1 = api.lookup_release(doi="10.123/abc")
r1.release_stage = row['release_stage']
api.update_release(eg.editgroup_id, r1.ident, r1)
api.accept_editgroup(eg.editgroup_id)
# set ingest request stage
raw['request']['release_stage'] = row['request_stage']
ingest_importer.reset()
ingest_importer.push_record(raw)
counts = ingest_importer.finish()
print(counts)
assert counts["total"] == 1
assert counts[row['status']] == 1
def test_ingest_dict_parse(ingest_importer):
with open('tests/files/example_ingest.json', 'r') as f:
raw = json.loads(f.readline())
f = ingest_importer.parse_record(raw)
assert f.sha1 == "00242a192acc258bdfdb151943419437f440c313"
assert f.md5 == "f4de91152c7ab9fdc2a128f962faebff"
assert f.mimetype == "application/pdf"
assert f.size == 255629
assert len(f.urls) == 2
for u in f.urls:
if u.rel == "web":
assert u.url.startswith("http://journals.plos.org")
if u.rel == "webarchive":
assert u.url.startswith("https://web.archive.org/")
assert len(f.release_ids) == 1
def test_ingest_dict_parse_old(ingest_importer):
with open('tests/files/example_ingest.old.json', 'r') as f:
raw = json.loads(f.readline())
# ancient ingest requests had no type; skip them
f = ingest_importer.parse_record(raw)
assert f is None
raw['request']['ingest_type'] = 'pdf'
f = ingest_importer.parse_record(raw)
assert f.sha1 == "00242a192acc258bdfdb151943419437f440c313"
assert f.md5 == "f4de91152c7ab9fdc2a128f962faebff"
assert f.mimetype == "application/pdf"
assert f.size == 255629
assert len(f.urls) == 2
for u in f.urls:
if u.rel == "web":
assert u.url.startswith("http://journals.plos.org")
if u.rel == "webarchive":
assert u.url.startswith("https://web.archive.org/")
assert len(f.release_ids) == 1
|