aboutsummaryrefslogtreecommitdiffstats
path: root/python/tests/transform_tests.py
blob: 7b583ac43b38745b137022876ae0dea78e6ef93d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106

import json
import pytest
from fatcat_tools import *
from fatcat_openapi_client import *
from fixtures import api
from import_journal_metadata import journal_metadata_importer

from import_crossref import crossref_importer
from import_matched import matched_importer

def test_basic_elasticsearch_convert(crossref_importer):
    with open('tests/files/crossref-works.single.json', 'r') as f:
        # not a single line
        raw = json.loads(f.read())
        r = crossref_importer.parse_record(raw)
    r.state = 'active'
    release_to_elasticsearch(r)

def test_rich_elasticsearch_convert():
    r = ReleaseEntity(
        title="something",
        release_year=1234,
        license_slug="CC-BY-NC",
        ext_ids=ReleaseExtIds(),
        refs=[
            ReleaseRef(),
            ReleaseRef(target_release_id="iznnn644szdwva7khyxqzc73bi"),
        ],
    )
    r.state = 'active'
    r.container = ContainerEntity(
        name="dummy journal",
        extra={
            "ia": {
                "sim": {
                    "year_spans": [[1000, 1100]],
                },
            },
            "kbart": {
                "lockss": {
                    "year_spans": [[1200, 1300]],
                },
                "jstor": {
                    "year_spans": [[1950, 1960], [1980, 2005]],
                },
            },
            "sherpa_romeo": {"color": "blue"},
            "doaj": {"as_of": "2010-02-03"},
        },
    )
    r.files = [FileEntity(
        mimetype="application/pdf",
        urls=[
            FileUrl(rel="dweb", url="dat://a954329dlk/thingie"),
            FileUrl(rel="webarchive", url="https://web.archive.org/web/20001122030405/http://example.com"),
            FileUrl(rel="web", url="https://archive.org/details/blah/file.pdf"),
        ],
        extra={
            "shadows": {},
        },
    )]
    es = release_to_elasticsearch(r)
    assert es['release_year'] == r.release_year
    assert es['in_ia'] == True
    assert es['in_jstor'] == False
    assert es['in_ia_sim'] == False
    assert es['in_ia'] == True
    assert es['in_web'] == True
    assert es['in_dweb'] == True
    assert es['is_oa'] == True
    assert es['is_longtail_oa'] == False
    assert es['ref_count'] == 2
    assert es['ref_linked_count'] == 1

def test_elasticsearch_release_from_json():
    r = entity_from_json(open('./tests/files/math_universe.json', 'r').read(), ReleaseEntity)
    release_to_elasticsearch(r)

def test_elasticsearch_container_transform(journal_metadata_importer):
    with open('tests/files/journal_metadata.sample.json', 'r') as f:
        raw = json.loads(f.readline())
        c = journal_metadata_importer.parse_record(raw)
    c.state = 'active'
    es = container_to_elasticsearch(c)
    assert es['publisher'] == c.publisher

def test_elasticsearch_file_transform(matched_importer):
    with open('tests/files/example_matched.json', 'r') as f:
        raw = json.loads(f.readline())
        f = matched_importer.parse_record(raw)

    f.state = 'active'
    es = file_to_elasticsearch(f)
    assert es['sha1'] == f.sha1
    assert es['sha256'] == f.sha256
    assert es['md5'] == f.md5
    assert es['size_bytes'] == f.size
    assert es['mimetype'] == f.mimetype
    assert es['in_ia'] == True
    assert 'publisher' in es['rel']

    # XXX: implement hosts and domain parsing with urlcanon
    #assert 'journals.plos.org' in es['host']
    #assert 'plos.org' in es['domain']