aboutsummaryrefslogtreecommitdiffstats
path: root/python/tests/clean_files.py
blob: ce1102be57e5974b0ef08c399d0a3c6f2a36d461 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59

import copy
import pytest

from fatcat_tools.cleanups import FileCleaner
from fatcat_openapi_client import *
from fixtures import *


@pytest.fixture(scope="function")
def file_cleaner(api):
    yield FileCleaner(api)

def test_url_cleanups(file_cleaner):

    f = FileEntity(
        sha1="027e7ed3ea1a40e92dd2657a1e3c992b5dc45dd2",
        urls=[],
    )

    f.urls = [
        FileUrl(url="https://web.archive.org/web/12345542/something.com/blah.pdf", rel="webarchive"),
        FileUrl(url="https://web.archive.org/web/None/something.com/blah.pdf", rel="webarchive"),
        FileUrl(url="https://archive.org/details/None/something.com/blah.pdf", rel="repository"),
    ]
    f = file_cleaner.clean_entity(f)

    # remove None wayback links
    assert len(f.urls) == 2
    for u in f.urls:
        assert not 'web/None' in u.url

    assert f == file_cleaner.clean_entity(f)
    assert f == file_cleaner.clean_entity(copy.deepcopy(f))

    # rel=repository -> rel=archive for archive.org links
    assert f.urls[1].rel == 'archive'

    # short wayback dates
    f.urls = [
        FileUrl(url="http://web.archive.org/web/20181031120933/https://www.jstage.jst.go.jp/article/jsci1978/1/1/1_1_231/_pdf", rel="webarchive"),
        FileUrl(url="http://web.archive.org/web/2018/https://www.jstage.jst.go.jp/article/jsci1978/1/1/1_1_231/_pdf", rel="webarchive"),
    ]
    f = file_cleaner.clean_entity(f)
    assert len(f.urls) == 1
    assert f.urls[0].url == 'http://web.archive.org/web/20181031120933/https://www.jstage.jst.go.jp/article/jsci1978/1/1/1_1_231/_pdf'

    assert f == file_cleaner.clean_entity(f)
    assert f == file_cleaner.clean_entity(copy.deepcopy(f))

    f.urls = [
        FileUrl(url="http://web.archive.org/web/2018/https://www.jstage.jst.go.jp/article/jsci1978/1/1/1_1_231/_pdf", rel="webarchive"),
    ]
    f = file_cleaner.clean_entity(f)
    assert len(f.urls) == 1
    assert f.urls[0].url == 'http://web.archive.org/web/2018/https://www.jstage.jst.go.jp/article/jsci1978/1/1/1_1_231/_pdf'

    assert f == file_cleaner.clean_entity(f)
    assert f == file_cleaner.clean_entity(copy.deepcopy(f))