blob: 80bcfa5a20dfae35bac2fe5e647476b3c2b38cd6 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
|
"""
A bunch of helpers to parse and normalize strings: external identifiers,
free-form input, titles, etc.
"""
import re
DOI_REGEX = re.compile("^10.\d{3,6}/\S+$")
def clean_doi(raw):
"""
Removes any:
- padding whitespace
- 'doi:' prefix
- URL prefix
Does not try to un-URL-encode
Returns None if not a valid DOI
"""
raw = raw.strip()
if len(raw.split()) != 1:
return None
if raw.startswith("doi:"):
raw = raw[4:]
if raw.startswith("http://"):
raw = raw[7:]
if raw.startswith("https://"):
raw = raw[8:]
if raw.startswith("doi.org/"):
raw = raw[8:]
if raw.startswith("dx.doi.org/"):
raw = raw[11:]
if not raw.startswith("10."):
return None
if not DOI_REGEX.fullmatch(raw):
return None
return raw
def test_clean_doi():
assert clean_doi("10.1234/asdf ") == "10.1234/asdf"
assert clean_doi("http://doi.org/10.1234/asdf ") == "10.1234/asdf"
assert clean_doi("https://dx.doi.org/10.1234/asdf ") == "10.1234/asdf"
assert clean_doi("doi:10.1234/asdf ") == "10.1234/asdf"
assert clean_doi("doi:10.1234/ asdf ") == None
ARXIV_ID_REGEX = re.compile("^(\d{4}.\d{4,5}|[a-z\-]+(\.[A-Z]{2})?/\d{7})(v\d+)?$")
def clean_arxiv_id(raw):
"""
Removes any:
- 'arxiv:' prefix
Works with versioned or un-versioned arxiv identifiers.
"""
raw = raw.strip()
if raw.lower().startswith("arxiv:"):
raw = raw[6:]
if raw.lower().startswith("https://arxiv.org/abs/"):
raw = raw[22:]
if not ARXIV_ID_REGEX.fullmatch(raw):
return None
return raw
def test_clean_arxiv_id():
assert clean_arxiv_id("0806.2878v1") == "0806.2878v1"
assert clean_arxiv_id("0806.2878") == "0806.2878"
assert clean_arxiv_id("1501.00001v1") == "1501.00001v1"
assert clean_arxiv_id("1501.00001") == "1501.00001"
assert clean_arxiv_id("hep-th/9901001v1") == "hep-th/9901001v1"
assert clean_arxiv_id("hep-th/9901001") == "hep-th/9901001"
assert clean_arxiv_id("math.CA/0611800v2") == "math.CA/0611800v2"
assert clean_arxiv_id("math.CA/0611800") == "math.CA/0611800"
assert clean_arxiv_id("0806.2878v1 ") == "0806.2878v1"
assert clean_arxiv_id("https://arxiv.org/abs/0806.2878v1") == "0806.2878v1"
assert clean_arxiv_id("arxiv:0806.2878v1") == "0806.2878v1"
assert clean_arxiv_id("arXiv:0806.2878v1") == "0806.2878v1"
assert clean_arxiv_id("hep-TH/9901001v1") == None
assert clean_arxiv_id("hßp-th/9901001v1") == None
assert clean_arxiv_id("math.CA/06l1800v2") == None
assert clean_arxiv_id("mßth.ca/0611800v2") == None
assert clean_arxiv_id("MATH.CA/0611800v2") == None
assert clean_arxiv_id("0806.2878v23") == "0806.2878v23" # ?
assert clean_arxiv_id("0806.2878v") == None
assert clean_arxiv_id("0806.2878") == "0806.2878"
assert clean_arxiv_id("006.2878v1") == None
assert clean_arxiv_id("0806.v1") == None
assert clean_arxiv_id("08062878v1") == None
def clean_pmcid(raw):
raw = raw.strip()
if len(raw.split()) != 1:
return None
if raw.startswith("PMC") and raw[3:] and raw[3:].isdigit():
return raw
return None
def clean_sha1(raw):
raw = raw.strip().lower()
if len(raw.split()) != 1:
return None
if len(raw) != 40:
return None
for c in raw:
if c not in "0123456789abcdef":
return None
return raw
def test_clean_sha1():
assert clean_sha1("0fba3fba0e1937aa0297de3836b768b5dfb23d7b") == "0fba3fba0e1937aa0297de3836b768b5dfb23d7b"
assert clean_sha1("0fba3fba0e1937aa0297de3836b768b5dfb23d7b ") == "0fba3fba0e1937aa0297de3836b768b5dfb23d7b"
assert clean_sha1("fba3fba0e1937aa0297de3836b768b5dfb23d7b") == None
assert clean_sha1("qfba3fba0e1937aa0297de3836b768b5dfb23d7b") == None
assert clean_sha1("0fba3fb a0e1937aa0297de3836b768b5dfb23d7b") == None
def clean_sha256(raw):
raw = raw.strip().lower()
if len(raw.split()) != 1:
return None
if len(raw) != 64:
return None
for c in raw:
if c not in "0123456789abcdef":
return None
return raw
def test_clean_sha256():
assert clean_sha256("6cc853f2ae75696b2e45f476c76b946b0fc2df7c52bb38287cb074aceb77bc7f") == "6cc853f2ae75696b2e45f476c76b946b0fc2df7c52bb38287cb074aceb77bc7f"
assert clean_sha256("0fba3fba0e1937aa0297de3836b768b5dfb23d7b") == None
ISSN_REGEX = re.compile("^\d{4}-\d{3}[0-9X]$")
def clean_issn(raw):
raw = raw.strip().upper()
if len(raw) != 9:
return None
if not ISSN_REGEX.fullmatch(raw):
return None
return raw
def test_clean_issn():
assert clean_issn("1234-4567") == "1234-4567"
assert clean_issn("1234-456X") == "1234-456X"
assert clean_issn("134-4567") == None
assert clean_issn("123X-4567") == None
ISBN13_REGEX = re.compile("^97(?:8|9)-\d{1,5}-\d{1,7}-\d{1,6}-\d$")
def clean_isbn13(raw):
raw = raw.strip()
if not ISBN13_REGEX.fullmatch(raw):
return None
return raw
def test_clean_isbn13():
assert clean_isbn13("978-1-56619-909-4") == "978-1-56619-909-4"
assert clean_isbn13("978-1-4028-9462-6") == "978-1-4028-9462-6"
assert clean_isbn13("978-1-56619-909-4 ") == "978-1-56619-909-4"
assert clean_isbn13("9781566199094") == None
ORCID_REGEX = re.compile("^\d{4}-\d{4}-\d{4}-\d{3}[\dX]$")
def clean_orcid(raw):
raw = raw.strip()
if not ORCID_REGEX.fullmatch(raw):
return None
return raw
def test_clean_orcid():
assert clean_orcid("0123-4567-3456-6789") == "0123-4567-3456-6789"
assert clean_orcid("0123-4567-3456-678X") == "0123-4567-3456-678X"
assert clean_orcid("0123-4567-3456-6789 ") == "0123-4567-3456-6789"
assert clean_orcid("01234567-3456-6780") == None
assert clean_orcid("0x23-4567-3456-6780") == None
|