aboutsummaryrefslogtreecommitdiffstats
path: root/fatcat_scholar/sim_pipeline.py
blob: 926d943da0462cb8d22c51ee28ca980ee6c11a9e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
import io
import sys
import sqlite3
import argparse
from typing import List, Dict, Optional, Any

import requests
import internetarchive

from fatcat_scholar.djvu import djvu_extract_leaf_texts
from fatcat_scholar.issue_db import IssueDB
from fatcat_scholar.schema import (
    DocType,
    IntermediateBundle,
)


def truncate_pub_meta(full: Dict[str, Any]) -> Dict[str, Any]:
    """
    Takes a complete archive.org metadata dictionary for a publication
    collection, and simplifies it by removing fields. Motivation is to make
    intermediate bundle files smaller.
    """
    full.pop("files")
    if "ulrichs" in full and full["ulrichs"]:
        full["ulrichs"][0].pop("reviews_mfl")
        full["ulrichs"][0].pop("editorial_description")

        # these are interesting, but just too long
        full["ulrichs"][0].pop("online_availability_full_text")
        full["ulrichs"][0].pop("abstracting_indexing")
        full["ulrichs"][0].pop("publisher_and_ordering_details")
    return full


def truncate_issue_meta(full: Dict[str, Any]) -> Dict[str, Any]:
    """
    Same as truncate_pub_meta() but for issue item metadata
    """
    full.pop("files")
    return full


class SimPipeline:
    def __init__(self, issue_db: IssueDB):
        self.issue_db: IssueDB = issue_db
        self.ia_client = internetarchive.get_session()

    def fetch_sim_issue(self, issue_item: str, pub_collection: str) -> Optional[Any]:
        """
        issue_item
        pages: str
        page_texts: list
            raw_text
            page_num
            leaf_num
        release_ident: Optional[str]
        pub_item_metadata
        issue_item_metadata
        """
        # fetch full metadata from API
        issue_meta = self.ia_client.get_metadata(issue_item)
        pub_meta = self.ia_client.get_metadata(pub_collection)

        leaf_index = dict()
        leaf_list = []
        if not "page_numbers" in issue_meta:
            # TODO: warn
            return None
        for entry in issue_meta["page_numbers"].get("pages", []):
            page_num = entry["pageNumber"]
            leaf_index[entry["leafNum"]] = page_num
            if not (page_num and page_num.isdigit()):
                continue
            page_num = int(page_num)
            leaf_list.append(entry["leafNum"])

        if not leaf_list:
            return None

        page_texts: List[Dict[str, Any]] = []
        issue_item_obj = self.ia_client.get_item(issue_item)
        issue_item_djvu = issue_item_obj.get_file(issue_item + "_djvu.xml")

        # override 'close()' method so we can still read out contents
        djvu_bytes = io.BytesIO()
        djvu_bytes.close = lambda: None  # type: ignore
        assert issue_item_djvu.download(fileobj=djvu_bytes)
        djvu_bytes.seek(0)
        djvu_xml = io.StringIO(djvu_bytes.read().decode("UTF-8"))
        del djvu_bytes

        leaf_dict = djvu_extract_leaf_texts(djvu_xml)

        for leaf_num, raw_text in leaf_dict.items():
            page_texts.append(
                dict(
                    page_num=leaf_index.get(leaf_num),
                    leaf_num=leaf_num,
                    raw_text=raw_text,
                )
            )

        return dict(
            issue_item=issue_item,
            pages=None,
            page_texts=page_texts,
            release_ident=None,
            pub_item_metadata=truncate_pub_meta(pub_meta),
            issue_item_metadata=truncate_issue_meta(issue_meta),
        )

    def full_issue_to_pages(self, full_issue: dict) -> List[IntermediateBundle]:
        pages = []
        for leaf in full_issue["page_texts"]:
            bundle = IntermediateBundle(
                doc_type=DocType.sim_page,
                releases=[],
                biblio_release_ident=None,
                grobid_fulltext=None,
                pdftotext_fulltext=None,
                sim_fulltext=dict(
                    issue_item=full_issue["issue_item"],
                    pages=str(leaf["page_num"]),
                    page_texts=[leaf],
                    release_ident=None,
                    pub_item_metadata=full_issue["pub_item_metadata"],
                    issue_item_metadata=full_issue["issue_item_metadata"],
                ),
            )
            pages.append(bundle)
        return pages

    def run_issue_db(self, limit: int = None) -> None:
        count = 0
        self.issue_db.db.row_factory = sqlite3.Row
        cur = self.issue_db.db.cursor()
        for row in cur.execute(
            "SELECT * FROM sim_issue LEFT JOIN sim_pub ON sim_issue.sim_pubid = sim_pub.sim_pubid WHERE sim_issue.release_count < 3"
        ):
            # filter out "contents" and "index" items
            # TODO: more filters; also redundant with IssueDB code?
            if row["issue_item"].endswith("_contents") or row["issue_item"].endswith(
                "_index"
            ):
                continue
            try:
                full_issue = self.fetch_sim_issue(
                    row["issue_item"], row["pub_collection"]
                )
            except requests.exceptions.ConnectionError as e:
                print(str(e), file=sys.stderr)
                continue
            except requests.exceptions.ReadTimeout as e:
                print(str(e), file=sys.stderr)
                continue
            if not full_issue:
                continue
            pages = self.full_issue_to_pages(full_issue)
            for bundle in pages:
                print(bundle.json(exclude_none=True, sort_keys=True))
                count += 1
                if limit is not None and count >= limit:
                    break
            if limit is not None and count >= limit:
                break


def main() -> None:
    """
    Run this command like:

        python -m fatcat_scholar.sim_pipeline
    """

    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )
    subparsers = parser.add_subparsers()

    parser.add_argument(
        "--issue-db-file",
        help="sqlite3 database file to open",
        default="data/issue_db.sqlite",
        type=str,
    )

    sub = subparsers.add_parser("run_issue_db", help="iterates through entire IssueDB")
    sub.set_defaults(func="run_issue_db")
    sub.add_argument("--limit", help="maximum number of pages to index", type=int)

    args = parser.parse_args()
    if not args.__dict__.get("func"):
        parser.print_help(file=sys.stderr)
        sys.exit(-1)

    sp = SimPipeline(issue_db=IssueDB(args.issue_db_file))

    if args.func == "run_issue_db":
        sp.run_issue_db(limit=args.limit)
    else:
        func = getattr(sp, args.func)
        func()


if __name__ == "__main__":
    main()