aboutsummaryrefslogtreecommitdiffstats
path: root/python/sandcrawler/misc.py
blob: dc46e9a6bb8a7589f48fa3f7f9ad38bec53a9675 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263

import base64
import magic
import hashlib
import datetime
from typing import Optional

import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry # pylint: disable=import-error
import urlcanon


def clean_url(s: str) -> str:
    s = s.strip()
    parsed = urlcanon.parse_url(s)
    if not parsed.port and parsed.colon_before_port:
        parsed.colon_before_port = b''
    return str(urlcanon.whatwg(parsed))

def url_fuzzy_equal(left: str, right: str) -> bool:
    """
    TODO: use proper surt library and canonicalization for this check
    """
    fuzzy_left = '://'.join(clean_url(left).replace('www.', '').replace(':80/', '/').split('://')[1:])
    fuzzy_right = '://'.join(clean_url(right).replace('www.', '').replace(':80/', '/').split('://')[1:])
    if fuzzy_left == fuzzy_right:
        return True
    elif fuzzy_left == fuzzy_right + "/" or fuzzy_right == fuzzy_left + "/":
        return True
    return False

def test_url_fuzzy_equal() -> None:
    assert True == url_fuzzy_equal(
        "http://www.annalsofian.org/article.asp?issn=0972-2327;year=2014;volume=17;issue=4;spage=463;epage=465;aulast=Nithyashree",
        "http://annalsofian.org/article.asp?issn=0972-2327;year=2014;volume=17;issue=4;spage=463;epage=465;aulast=Nithyashree")

def gen_file_metadata(blob: bytes, allow_empty: bool = False) -> dict:
    """
    Takes a file blob (bytestream) and returns hashes and other metadata.

    Returns a dict: size_bytes, md5hex, sha1hex, sha256hex, mimetype
    """
    assert blob is not None
    if not allow_empty:
        assert blob
    if len(blob) < 1024*1024:
        mimetype = magic.Magic(mime=True).from_buffer(blob)
    else:
        mimetype = magic.Magic(mime=True).from_buffer(blob[:(1024*1024)])
    if mimetype in ("application/xml", "text/xml"):
        # crude checks for XHTML or JATS XML, using only first 1 kB of file
        if b"<htm" in blob[:1024] and b'xmlns="http://www.w3.org/1999/xhtml"' in blob[:1024]:
            mimetype = "application/xhtml+xml"
        elif b"<article " in blob[:1024] and not b"<html" in blob[:1024]:
            mimetype = "application/jats+xml"
    hashes = [
        hashlib.sha1(),
        hashlib.sha256(),
        hashlib.md5(),
    ]
    for h in hashes:
        h.update(blob)
    return dict(
        size_bytes=len(blob),
        sha1hex=hashes[0].hexdigest(),
        sha256hex=hashes[1].hexdigest(),
        md5hex=hashes[2].hexdigest(),
        mimetype=mimetype,
    )

def gen_file_metadata_path(path: str, allow_empty: bool = False) -> dict:
    """
    Variant of gen_file_metadata() which works with files on local disk
    """
    assert path is not None
    mimetype = magic.Magic(mime=True).from_file(path)
    if mimetype in ("application/xml", "text/xml"):
        with open(path, 'rb') as f:
            blob = f.read(1024)
            # crude checks for XHTML or JATS XML, using only first 1 kB of file
            if b"<htm" in blob[:1024] and b'xmlns="http://www.w3.org/1999/xhtml"' in blob[:1024]:
                mimetype = "application/xhtml+xml"
            elif b"<article " in blob[:1024] and not b"<html" in blob[:1024]:
                mimetype = "application/jats+xml"
    hashes = [
        hashlib.sha1(),
        hashlib.sha256(),
        hashlib.md5(),
    ]
    size_bytes = 0
    with open(path, 'rb') as f:
        while True:
            chunk = f.read(1024*1024)
            if not chunk:
                break
            size_bytes += len(chunk)
            for h in hashes:
                h.update(chunk)
    if not allow_empty:
        assert size_bytes > 0
    return dict(
        size_bytes=size_bytes,
        sha1hex=hashes[0].hexdigest(),
        sha256hex=hashes[1].hexdigest(),
        md5hex=hashes[2].hexdigest(),
        mimetype=mimetype,
    )

def b32_hex(s: str) -> str:
    """
    Converts a base32-encoded SHA-1 checksum into hex-encoded

    base32 checksums are used by, eg, heritrix and in wayback CDX files
    """
    s = s.strip().split()[0].lower()
    if s.startswith("sha1:"):
        s = s[5:]
    if len(s) != 32:
        if len(s) == 40:
            return s
        raise ValueError("not a base-32 encoded SHA-1 hash: {}".format(s))
    return base64.b16encode(base64.b32decode(s.upper())).lower().decode('utf-8')

NORMAL_MIME = (
    'application/pdf',
    'application/postscript',
    'text/html',
    'text/xml',
    'application/octet-stream',
)

def normalize_mime(raw: str) -> Optional[str]:
    raw = raw.lower().strip()
    for norm in NORMAL_MIME:
        if raw.startswith(norm):
            return norm

    # Special cases
    if raw.startswith('application/xml'):
        return 'text/xml'
    if raw.startswith('application/x-pdf'):
        return 'application/pdf'
    if raw in (
            '.pdf',
            ):
        return 'application/pdf'
    if raw in (
            'application/download',
            'binary/octet-stream',
            'unk',
            'application/x-download',
            'application/octetstream',
            'application/force-download',
            'application/unknown',
            ):
        return 'application/octet-stream'
    return None


def test_normalize_mime():
    assert normalize_mime("asdf") is None
    assert normalize_mime("application/pdf") == "application/pdf"
    assert normalize_mime("application/pdf+journal") == "application/pdf"
    assert normalize_mime("Application/PDF") == "application/pdf"
    assert normalize_mime("application/p") is None
    assert normalize_mime("application/xml+stuff") == "text/xml"
    assert normalize_mime("application/x-pdf") == "application/pdf"
    assert normalize_mime("application/x-html") is None
    assert normalize_mime("unk") == "application/octet-stream"
    assert normalize_mime("binary/octet-stream") == "application/octet-stream"


def parse_cdx_line(raw_cdx: str, normalize=True) -> Optional[dict]:
    """
    This method always filters a few things out:

    - non-HTTP requests, based on lack of status code (eg, whois)
    """

    cdx = raw_cdx.split()
    if len(cdx) < 11:
        return None

    surt = cdx[0]
    dt = cdx[1]
    url = cdx[2]
    mime = normalize_mime(cdx[3])
    http_status = cdx[4]
    sha1b32 = cdx[5]
    c_size = cdx[8]
    offset = cdx[9]
    warc = cdx[10]

    if not (sha1b32.isalnum() and c_size.isdigit() and offset.isdigit()
            and len(sha1b32) == 32 and dt.isdigit()):
        return None

    if '-' in (surt, dt, url, http_status, sha1b32, c_size, offset, warc):
        return None

    if mime is None or mime == '-':
        mime = "application/octet-stream"

    if normalize:
        mime = normalize_mime(mime)

    sha1hex = b32_hex(sha1b32)

    return dict(
        surt=surt,
        url=url,
        datetime=dt,
        mimetype=mime,
        http_status=int(http_status),
        sha1b32=sha1b32,
        sha1hex=sha1hex,
        warc_csize=int(c_size),
        warc_offset=int(offset),
        warc_path=warc,
    )

def parse_cdx_datetime(dt_str: str) -> Optional[datetime.datetime]:
    if not dt_str:
        return None
    try:
        return datetime.datetime.strptime(dt_str, "%Y%m%d%H%M%S")
    except Exception:
        return None

def test_parse_cdx_datetime() -> None:
    assert parse_cdx_datetime("") == None
    assert parse_cdx_datetime("asdf") == None
    assert parse_cdx_datetime("19930203123045") != None
    assert parse_cdx_datetime("20201028235103") == datetime.datetime(year=2020, month=10, day=28, hour=23, minute=51, second=3)

def datetime_to_cdx(dt: datetime.datetime) -> str:
    return '%04d%02d%02d%02d%02d%02d' % (
        dt.year, dt.month, dt.day,
        dt.hour, dt.minute, dt.second,
    )

def test_datetime_to_cdx() -> None:
    assert "20201028235103" == datetime_to_cdx(datetime.datetime(year=2020, month=10, day=28, hour=23, minute=51, second=3))

def requests_retry_session(retries=10, backoff_factor=3,
        status_forcelist=(500, 502, 504), session=None) -> requests.Session:
    """
    From: https://www.peterbe.com/plog/best-practice-with-retries-with-requests
    """
    session = session or requests.Session()
    retry = Retry(
        total=retries,
        read=retries,
        connect=retries,
        backoff_factor=backoff_factor,
        status_forcelist=status_forcelist,
    )
    adapter = HTTPAdapter(max_retries=retry)
    session.mount('http://', adapter)
    session.mount('https://', adapter)
    return session