summaryrefslogtreecommitdiffstats
path: root/python/fatcat_tools/harvest/pubmed.py
blob: 7afb2dab4f28fcc3553c42ff91366b9437bc8c74 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
"""
Pubmed harvest via FTP.
"""

import collections
import io
import re
import sys
import tempfile
import xml.etree.ElementTree as ET
from ftplib import FTP
from urllib.parse import urljoin, urlparse

import dateparser
from bs4 import BeautifulSoup
from confluent_kafka import KafkaException, Producer

from .harvest_common import HarvestState


class PubmedFTPWorker:
    """
    Access Pubmed FTP host for daily updates.

    * Server directory: ftp://ftp.ncbi.nlm.nih.gov/pubmed/updatefiles
    * Docs: ftp://ftp.ncbi.nlm.nih.gov/pubmed/updatefiles/README.txt

    Daily Update Files (02/2020)
    ----------------------------
    Each day, NLM produces update files that include new, revised and deleted
    citations. The first Update file to be loaded after loading the complete
    set of 2019 MEDLINE/PubMed Baseline files is pubmed20n1016.xml.

    Usually, three files per update, e.g.:

    * ftp://ftp.ncbi.nlm.nih.gov/pubmed/updatefiles/pubmed20n1016_stats.html
    * ftp://ftp.ncbi.nlm.nih.gov/pubmed/updatefiles/pubmed20n1016.xml.gz
    * ftp://ftp.ncbi.nlm.nih.gov/pubmed/updatefiles/pubmed20n1016.xml.gz.md5

    The HTML contains the date.

        <html>
        <head><title></title></head>
        <body>
        <h4>Filename: pubmed20n1019.xml -- Created: Wed Dec 18 14:31:09 EST 2019</h4>
        <table cellspacing="0" cellpadding="0" border="0" width="300">
        <tr>

    When this workers starts, it will figure out a mapping from date to XML
    files by looking at all the HTML files.
    """
    def __init__(self, kafka_hosts, produce_topic, state_topic, start_date=None, end_data=None):
        self.host = 'ftp.ncbi.nlm.nih.gov'
        self.produce_topic = produce_topic
        self.state_topic = state_topic
        self.kafka_config = {
            'bootstrap.servers': kafka_hosts,
            'message.max.bytes': 20000000, # ~20 MBytes; broker is ~50 MBytes
        }
        self.loop_sleep = 60*60 # how long to wait, in seconds, between date checks
        self.state = HarvestState(start_date, end_date)
        self.state.initialize_from_kafka(self.state_topic, self.kafka_config)
        self.date_file_map = self.generate_date_file_map()
        if len(self.date_file_map) == 0:
            raise ValueError('mapping from dates to files should not be empty')

    def generate_date_file_map(self):
        """
        Generate a dictionary mapping date (strings) to filepaths. The date is
        parsed from pubmed20n1016_stats.html, mapping to absolute path on FTP,
        e.g. "2020-01-02": "/pubmed/updatefiles/pubmed20n1016.xml.gz".
        """
	mapping = collections.defaultdict(set)
        pattern = re.compile(r'Filename: ([^ ]*.xml) -- Created: ([^<]*)')
        ftp = FTP(self.host)
        ftp.login()
        filenames = ftp.nlst('/pubmed/updatefiles')
	for name in filenames:
	    if not name.endswith('.html'):
		continue
	    sio = io.StringIO()
	    ftp.retrlines('RETR {}'.format(name), sio.write)
	    contents = sio.getvalue()
	    match = pattern.search(contents)
	    if match is None:
                print('pattern miss on: {}, may need to adjust pattern: {}'.format(contents, pattern),
                      file=sys.stderr)
		continue
	    filename, filedate = match.groups()  # ('pubmed20n1017.xml', 'Tue Dec 17 15:23:32 EST 2019')
	    date = dateparser.parse(filedate)
	    fullpath = '/pubmed/updatefiles/{}.gz'.format(filename)
	    mapping[date.format('%Y-%m-%d')].add(fullpath)

        self.date_file_map = mapping
        print('generated date-file mapping for {} dates'.format(len(mapping)), file=sys.stderr)


    def fetch_date(self, date):
        """
        Fetch file for a given date and feed Kafka one article per message.
        """
        def fail_fast(err, msg):
            if err is not None:
                print("Kafka producer delivery error: {}".format(err), file=sys.stderr)
                print("Bailing out...", file=sys.stderr)
                raise KafkaException(err)

        producer_conf = self.kafka_config.copy()
        producer_conf.update({
            'delivery.report.only.error': True,
            'default.topic.config': {
                'request.required.acks': -1, # all brokers must confirm
            },
        })
        producer = Producer(producer_conf)

        date_str = date.format('%Y-%m-%d')
        paths = self.date_file_map.get(date_str)
        if paths is None:
            print("WARN: no pubmed update for this date: {} (UTC), available dates were: {}".format(date_str, self.date_file_map), file=sys.stderr)
            return

        count = 0
        for path in paths:
            filename = ftpretr("ftp://{}".format(urljoin(self.host, path)))
            for blob in xmlstream(filename, 'PubmedArticle', encoding='utf-8'):
                soup = BeautifulSoup(blob)
                pmid = soup.find('PMID')
                if pmid is None:
                    raise ValueError('no PMID found, adjust identifier extraction')
                count += 1
                if count % 50 == 0:
                    print("... up to {} from {}".format(count, filename))
                producer.produce(
                    self.produce_topic,
                    blob,
                    key=pmid.text,
                    on_delivery=fail_fast)
            producer.flush()

    def run(self, continuous=False):
        while True:
            current = self.state.next(continuous)
            if current:
                print("Fetching DOIs updated on {} (UTC)".format(current))
                self.fetch_date(current)
                self.state.complete(current,
                    kafka_topic=self.state_topic,
                    kafka_config=self.kafka_config)
                continue

            if continuous:
                print("Sleeping {} seconds...".format(self.loop_sleep))
                time.sleep(self.loop_sleep)
            else:
                break
        print("{} DOI ingest caught up".format(self.name))


class ftpretr(url):
    """
    Fetch (RETR) a remote file given by its URL (e.g.
    "ftp://ftp.ncbi.nlm.nih.gov/pubmed/updatefiles/pubmed20n1016.xml.gz") to a
    local temporary file.
    """
    parsed = urlparse(url)
    server, path = parsed.netloc, parsed.path
    ftp = FTP(self.server)
    ftp.login()
    with tempfile.NamedTemporaryFile(prefix='fatcat-ftp-tmp-', delete=False) as f:
        ftp.retrbinary('RETR %s' % path, f.write)
    ftp.close()
    return f.name


def xmlstream(filename, tag, encoding='utf-8'):
    """
    Given a path to an XML file and a tag name (without namespace), stream
    through the XML, and emit the element denoted by tag for processing as string.

    for snippet in xmlstream("sample.xml", "sometag"):
	print(len(snippet))
    """
    def strip_ns(tag):
        if not '}' in tag:
            return tag
        return tag.split('}')[1]

    # https://stackoverflow.com/a/13261805, http://effbot.org/elementtree/iterparse.htm
    context = iter(ET.iterparse(filename, events=(
        'start',
        'end',
    )))
    _, root = next(context)

    for event, elem in context:
        if not strip_ns(elem.tag) == tag or event == 'start':
            continue

        yield ET.tostring(elem, encoding=encoding)
        root.clear()