aboutsummaryrefslogtreecommitdiffstats
path: root/python/sandcrawler/html.py
blob: 812335f4abd317810094d9256ccf9366fcff3ddd (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187

import re
import sys
import urllib.parse

from bs4 import BeautifulSoup

RESEARCHSQUARE_REGEX = re.compile(r'"url":"(https://assets.researchsquare.com/files/.{1,50}/v\d+/Manuscript.pdf)"')
IEEEXPLORE_REGEX = re.compile(r'"pdfPath":"(/.*?\.pdf)"')
OVID_JOURNAL_URL_REGEX = re.compile(r'journalURL = "(http.*)";')

def test_regex():
    lines = """
    blah
    var journalURL = "https://journals.lww.com/co-urology/fulltext/10.1097/MOU.0000000000000689";
    asdf"""
    m = OVID_JOURNAL_URL_REGEX.search(lines)
    assert m.group(1) == "https://journals.lww.com/co-urology/fulltext/10.1097/MOU.0000000000000689"

def extract_fulltext_url(html_url, html_body):
    """
    Takes an HTML document (and URL), assumed to be a landing page, and tries
    to find a fulltext PDF url.
    """

    host_prefix = '/'.join(html_url.split('/')[:3])
    soup = BeautifulSoup(html_body, 'html.parser')

    ### General Tricks ###

    # highwire-style meta tag
    meta = soup.find('meta', attrs={"name":"citation_pdf_url"})
    if not meta:
        meta = soup.find('meta', attrs={"name":"bepress_citation_pdf_url"})
    # wiley has a weird almost-blank page we don't want to loop on
    if meta and not "://onlinelibrary.wiley.com/doi/pdf/" in html_url:
        url = meta['content'].strip()
        if url.startswith('/'):
            return dict(pdf_url=host_prefix+url, technique='citation_pdf_url')
        elif url.startswith('http'):
            return dict(pdf_url=url, technique='citation_pdf_url')
        else:
            print("malformed citation_pdf_url? {}".format(url), file=sys.stderr)

    # sage, and also utpjournals (see below)
    # https://journals.sagepub.com/doi/10.1177/2309499019888836
    # <a href="http://journals.sagepub.com/doi/pdf/10.1177/2309499019888836" class="show-pdf" target="_self">
    # <a href="http://utpjournals.press/doi/pdf/10.3138/cjh.ach.54.1-2.05" class="show-pdf" target="_blank">
    href = soup.find('a', attrs={"class":"show-pdf"})
    if href:
        url = href['href'].strip()
        if url.startswith('http'):
            return dict(pdf_url=url, technique='href_show-pdf')

    # ACS (and probably others) like:
    #   https://pubs.acs.org/doi/10.1021/acs.estlett.9b00379
    #   <a href="/doi/pdf/10.1021/acs.estlett.9b00379" title="PDF" target="_blank" class="button_primary"><i class="icon-file-pdf-o"></i><span>PDF (1 MB)</span></a>
    href = soup.find('a', attrs={"title":"PDF"})
    if href:
        url = href['href'].strip()
        if url.startswith('http'):
            return dict(pdf_url=url, technique='href_title')
        elif url.startswith('/'):
            return dict(pdf_url=host_prefix+url, technique='href_title')

    # http://www.jasstudies.com/DergiTamDetay.aspx?ID=3401
    # <embed src="/files/jass_makaleler/1359848334_33-Okt.%20Yasemin%20KARADEM%C4%B0R.pdf" type="application/pdf" />
    embed = soup.find('embed', attrs={"type": "application/pdf"})
    if embed:
        url = embed['src'].strip()
        if url.startswith('/'):
            url = host_prefix+url
        if url.startswith('http'):
            return dict(pdf_url=url, technique='embed_type')

    ### Publisher/Platform Specific ###

    # eLife (elifesciences.org)
    if '://elifesciences.org/articles/' in html_url:
        anchor = soup.find("a", attrs={"data-download-type": "pdf-article"})
        if anchor:
            url = anchor['href'].strip()
            assert '.pdf' in url
            return dict(pdf_url=url, technique='publisher')

    # research square (researchsquare.com)
    if 'researchsquare.com/article/' in html_url:
        # JSON in body with a field like:
        # "url":"https://assets.researchsquare.com/files/4a57970e-b002-4608-b507-b95967649483/v2/Manuscript.pdf"
        m = RESEARCHSQUARE_REGEX.search(html_body.decode('utf-8'))
        if m:
            url = m.group(1)
            assert len(url) < 1024
            return dict(release_stage="manuscript", pdf_url=url, technique='publisher')

    # elseiver linking hub
    # https://linkinghub.elsevier.com/retrieve/pii/S1569199319308975
    if '://linkinghub.elsevier.com/retrieve/pii/' in html_url:
        # <input type="hidden" name="redirectURL" value="http%3A%2F%2Fcysticfibrosisjournal.com%2Fretrieve%2Fpii%2FS1569199319308975" id="redirectURL"/>
        redirect = soup.find("input", attrs={"name": "redirectURL"})
        if redirect:
            url = redirect['value'].strip()
            if 'http' in url:
                url = urllib.parse.unquote(url)
                # drop any the query parameter
                url = url.split('?via')[0]
                return dict(next_url=url, technique="elsevier-linkinghub")

    # ieeexplore.ieee.org
    # https://ieeexplore.ieee.org/document/8730316
    if '://ieeexplore.ieee.org/document/' in html_url:
        # JSON in body with a field like:
        # "pdfPath":"/iel7/6287639/8600701/08730316.pdf",
        m = IEEEXPLORE_REGEX.search(html_body.decode('utf-8'))
        if m:
            url = m.group(1)
            assert len(url) < 1024
            return dict(release_stage="published", pdf_url=host_prefix+url, technique="publisher")
    # https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=8730313
    if '://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber' in html_url:
        # HTML iframe like:
        # <iframe src="http://web.archive.org/web/20191026011528if_/https://ieeexplore.ieee.org/ielx7/6287639/8600701/08730313.pdf?tp=&amp;arnumber=8730313&amp;isnumber=8600701&amp;ref=" frameborder="0"></iframe>
        iframe = soup.find("iframe")
        if iframe and '.pdf' in iframe['src']:
            return dict(pdf_url=iframe['src'], technique="iframe")

    # utpjournals.press
    # https://utpjournals.press/doi/10.3138/cjh.ach.54.1-2.05
    if '://utpjournals.press/doi/10.' in html_url:
        # <a href="http://utpjournals.press/doi/pdf/10.3138/cjh.ach.54.1-2.05" class="show-pdf" target="_blank">
        href = soup.find('a', attrs={"class":"show-pdf"})
        if href:
            url = href['href'].strip()
            if url.startswith('http'):
                return dict(pdf_url=url, technique='publisher-href')

    # https://www.jcancer.org/v10p4038.htm
    # simple journal-specific href
    if '://www.jcancer.org/' in html_url and html_url.endswith(".htm"):
        # <a href='v10p4038.pdf' class='textbutton'>PDF</a>
        href = soup.find('a', attrs={"class":"textbutton"})
        if href:
            url = href['href'].strip()
            if url.endswith(".pdf") and not "http" in url:
                return dict(pdf_url=host_prefix+"/"+url, technique='journal-href')

    # https://insights.ovid.com/crossref?an=00042307-202001000-00013
    # Ovid is some kind of landing page bounce portal tracking run-around.
    # Can extract actual journal URL from javascript blob in the HTML
    if '://insights.ovid.com/crossref' in html_url:
        # var journalURL = "https://journals.lww.com/co-urology/fulltext/10.1097/MOU.0000000000000689";
        m = OVID_JOURNAL_URL_REGEX.search(html_body.decode('utf-8'))
        if m:
            url = m.group(1)
            assert len(url) < 1024
            return dict(next_url=url, technique='ovid')

    # osf.io
    # https://osf.io/8phvx/
    # https://osf.io/preprints/socarxiv/8phvx/
    # wow, they ship total javascript crud! going to just guess download URL
    # based on URL for now. Maybe content type header would help?
    if '://osf.io/' in html_url and not '/download' in html_url:
        if not html_url.endswith("/"):
            next_url = html_url+"/download"
        else:
            next_url = html_url+"download"
        return dict(next_url=next_url, technique='osf-by-url')

    # wiley
    # https://onlinelibrary.wiley.com/doi/pdf/10.1111/1467-923X.12787
    if "://onlinelibrary.wiley.com/doi/pdf/" in html_url:
        if "/doi/pdfdirect/" in html_body:
            next_url = html_url.replace('/doi/pdf/', '/doi/pdfdirect/')
            return dict(next_url=next_url, technique='wiley-pdfdirect')

    # taylor and frances
    # https://www.tandfonline.com/doi/full/10.1080/19491247.2019.1682234
    # <a href="/doi/pdf/10.1080/19491247.2019.1682234?needAccess=true" class="show-pdf" target="_blank">
    if "://www.tandfonline.com/doi/full/10." in html_url:
        href = soup.find('a', attrs={"class":"show-pdf"})
        if href:
            url = href['href'].strip()
            if "/pdf/" in url:
                return dict(pdf_url=host_prefix+url, technique='publisher')

    return dict()