From 9beb322cfd68d8db73a132ac57f963aef11c8501 Mon Sep 17 00:00:00 2001 From: Bryan Newbold Date: Mon, 11 Mar 2019 16:08:26 -0700 Subject: basic JALC XML DOI metadata parser --- python/parse_jalc_xml.py | 209 +++++++++++++++++++++++++++++++++ python/tests/files/jalc_lod_sample.xml | 176 +++++++++++++++++++++++++++ 2 files changed, 385 insertions(+) create mode 100644 python/parse_jalc_xml.py create mode 100644 python/tests/files/jalc_lod_sample.xml diff --git a/python/parse_jalc_xml.py b/python/parse_jalc_xml.py new file mode 100644 index 00000000..7df79421 --- /dev/null +++ b/python/parse_jalc_xml.py @@ -0,0 +1,209 @@ + +import sys +import json +import datetime +import unicodedata +from bs4 import BeautifulSoup +from bs4.element import NavigableString + + +DATE_FMT = "%Y-%m-%d" + +def is_cjk(s): + if not s: + return False + return unicodedata.name(s[0]).startswith("CJK") + +class JalcXmlParser(): + """ + Converts JALC DOI metadata (in XML/RDF format) to fatcat release entity + + NOTE: some JALC DOIs seem to get cross-registered with Crossref + """ + + def __init__(self): + pass + + def parse_file(self, handle): + + # 1. open with beautiful soup + soup = BeautifulSoup(handle, "xml") + + # 2. iterate over articles, call parse_article on each + for record in soup.find_all("Description"): + resp = self.parse_record(record) + print(json.dumps(resp)) + #sys.exit(-1) + + + def parse_record(self, record): + """ + In JALC metadata, both English and Japanese records are given for most + fields. + """ + + #extra = dict() + #extra_jalc = dict() + + titles = record.find_all("title") + title = titles[0].string.strip() + original_title = None + if title.endswith('.'): + title = title[:-1] + if len(titles) > 1: + original_title = titles[1].string.strip() + if original_title.endswith('.'): + original_title = original_title[:-1] + + doi = None + if record.doi: + doi = record.doi.string.lower().strip() + assert doi.startswith('10.') + + contribs = [] + people = record.find_all("Person") + if people and (len(people) % 2 == 0) and is_cjk(people[1].find('name').string): + # both english and japanese names are included + for i in range(int(len(people)/2)): + # both english and japanese names are included for every author + eng = people[i*2] + jpn = people[i*2 + 1] + raw_name = eng.find('name') + orig_name = jpn.find('name') + if not raw_name: + raw_name = orig_name + contrib = dict( + raw_name=raw_name.string, + role='author', + ) + if raw_name and orig_name: + contrib['extra'] = dict(original_name=orig_name.string) + contribs.append(contrib) + elif people: + for eng in people: + raw_name = eng.find('name') + contrib = dict( + raw_name=eng.find('name').string, + role='author', + ) + contribs.append(contrib) + + release_year = None + release_date = None + date = record.date or None + if date: + date = date.string + if len(date) is 10: + release_date = datetime.datetime.strptime(state['completed-date'], DATE_FMT).date() + release_year = release_date.year + release_date = release_date.isoformat() + elif len(date) is 4: + release_year = int(date) + + pages = None + if record.startingPage: + pages = record.startingPage.string + if record.endingPage: + pages = "{}-{}".format(pages, record.endingPage.string) + volume = None + if record.volume: + volume = record.volume.string + issue = None + if record.number: + # note: number/issue transform + issue = record.number.string + + issn = None + issn_list = record.find_all("issn") + if issn_list: + # if we wanted the other ISSNs, would also need to uniq the list. + # But we only need one to lookup ISSN-L/container + issn = issn_list[0].string + + container = dict() + container_extra = dict() + container_name = None + if record.publicationName: + pubs = [p.string.strip() for p in record.find_all("publicationName")] + pubs = [p for p in pubs if p] + assert(pubs) + if len(pubs) > 1 and pubs[0] == pubs[1]: + pubs = [pubs[0]] + elif len(pubs) > 1 and is_cjk(pubs[0]): + # ordering is not reliable + pubs = [pubs[1], pubs[0]] + container_name = pubs[0] + container['name'] = container_name + if len(pubs) > 1: + orig_container_name = pubs[1] + container_extra['original_name'] = pubs[1] + publisher = None + if record.publisher: + pubs = [p.string.strip() for p in record.find_all("publisher")] + pubs = [p for p in pubs if p] + if len(pubs) > 1 and pubs[0] == pubs[1]: + pubs = [pubs[0]] + elif len(pubs) > 1 and is_cjk(pubs[0]): + # ordering is not reliable + pubs = [pubs[1], pubs[0]] + publisher = pubs[0] + container['publisher'] = publisher + if len(pubs) > 1: + container_extra['publisher_alt_name'] = pubs[1] + if container_extra: + container['extra'] = container_extra + if not container: + container = None + + # the vast majority of works are in japanese + # TODO: any indication when *not* in japanese? + lang = "ja" + + # reasonable default for this collection + release_type = "article-journal" + + re = dict( + work_id=None, + title=title, + original_title=original_title, + release_type="article-journal", + release_status='submitted', # XXX: source_type? + release_date=release_date, + release_year=release_year, + #arxiv_id + doi=doi, + #pmid + #pmcid + #isbn13 # never in Article + volume=volume, + issue=issue, + pages=pages, + publisher=publisher, + language=lang, + #license_slug # not in MEDLINE + + # content, mimetype, lang + #abstracts=abstracts, + + # raw_name, role, raw_affiliation, extra + contribs=contribs, + + # name, type, publisher, issnl + # extra: issnp, issne, original_name, languages, country + container=container, + + # extra: + # withdrawn_date + # translation_of + # subtitle + # aliases + # container_name + # group-title + # pubmed: retraction refs + #extra=extra, + ) + return re + +if __name__=='__main__': + parser = JalcXmlParser() + parser.parse_file(open(sys.argv[1])) diff --git a/python/tests/files/jalc_lod_sample.xml b/python/tests/files/jalc_lod_sample.xml new file mode 100644 index 00000000..3a9dd770 --- /dev/null +++ b/python/tests/files/jalc_lod_sample.xml @@ -0,0 +1,176 @@ + + +10.2497/jjspm.36.898 +New carbides in the Ni-Ti-Mo-C system. +Ni-Ti-Mo-C系に出現する新炭化物相について + + +Hashimoto Yasuhiko +Hashimoto +Yasuhiko + + + + +橋本 雍彦 +橋本 +雍彦 + + + + +Koyama Koichiro +Koyama +Koichiro + + + + +香山 滉一郎 +香山 +滉一郎 + + + + +Suzuki Kenji +Suzuki +Kenji + + + + +鈴木 建次 +鈴木 +建次 + + + + +Takahashi Teruo +Takahashi +Teruo + + + + +高橋 輝男 +高橋 +輝男 + + +Hashimoto Yasuhiko +橋本 雍彦 +Koyama Koichiro +香山 滉一郎 +Suzuki Kenji +鈴木 建次 +Takahashi Teruo +高橋 輝男 +Japan Society of Powder and Powder Metallurgy +一般社団法人 粉体粉末冶金協会 +1989 +36 +8 +898 +902 +0532-8799 +0532-8799 +1880-9014 +1880-9014 + +Journal of the Japan Society of Powder and Powder Metallurgy + +粉体および粉末冶金 + + +10.2497/jjspm.36.903 + +Effects of grain size on cutting performance of Al2O3-TiC ceramics tool. + +Al2O3-TiCセラミックス工具の切削性能におよぼすセラミックス粒度の影響 + + + +Katsumura Yuji +Katsumura +Yuji + + + + +勝村 祐次 +勝村 +祐次 + + + + +Sobata Kaoru +Sobata +Kaoru + + + + +蕎麦田 薫 +蕎麦田 + + + + + +Uehara Yoshito +Uehara +Yoshito + + + + +上原 好人 +上原 +好人 + + + + +Suzuki Hisashi +Suzuki +Hisashi + + + + +鈴木 寿 +鈴木 +寿 + + +Katsumura Yuji +勝村祐次 +Sobata Kaoru +蕎麦田 薫 +Uehara Yoshito +上原好人 +Suzuki Hisashi +鈴木寿 +Japan Society of Powder and Powder Metallurgy +一般社団法人 粉体粉末冶金協会 +1989 +36 +8 +903 +907 +0532-8799 +0532-8799 +1880-9014 +1880-9014 + +Journal of the Japan Society of Powder and Powder Metallurgy + +粉体および粉末冶金 + + -- cgit v1.2.3