From 114c6b611148d2ff499bcea302eee0eca00df647 Mon Sep 17 00:00:00 2001 From: Bryan Newbold Date: Fri, 6 Apr 2018 12:39:49 -0700 Subject: small grobid2json test --- mapreduce/extraction_cdx_grobid.py | 1 + mapreduce/grobid2json.py | 12 +++- mapreduce/tests/files/small.json | 43 +++++++++++++++ mapreduce/tests/files/small.xml | 110 +++++++++++++++++++++++++++++++++++++ 4 files changed, 164 insertions(+), 2 deletions(-) create mode 100644 mapreduce/tests/files/small.json create mode 100644 mapreduce/tests/files/small.xml diff --git a/mapreduce/extraction_cdx_grobid.py b/mapreduce/extraction_cdx_grobid.py index a4a13f8..63f290a 100755 --- a/mapreduce/extraction_cdx_grobid.py +++ b/mapreduce/extraction_cdx_grobid.py @@ -61,6 +61,7 @@ class MRExtractCdxGrobid(MRJob): r = requests.post(self.options.grobid_uri + "/api/processFulltextDocument", files={'input': content}) if r.status_code is not 200: + # if invalid file, get a 400 with JSON body with 'description' key (and others) # XXX: return None return r diff --git a/mapreduce/grobid2json.py b/mapreduce/grobid2json.py index cc6eb2c..52a3125 100755 --- a/mapreduce/grobid2json.py +++ b/mapreduce/grobid2json.py @@ -1,6 +1,10 @@ #!/usr/bin/env python3 """ +NB: adapted to work as a library for PDF extraction. Will probably be +re-written eventually to be correct, complete, and robust; this is just a +first iteration. + This script tries to extract everything from a GROBID TEI XML fulltext dump: - header metadata @@ -38,6 +42,8 @@ def journal_info(elem): journal = dict() journal['name'] = elem.findtext('.//{%s}monogr/{%s}title' % (ns, ns)) journal['publisher'] = elem.findtext('.//{%s}publicationStmt/{%s}publisher' % (ns, ns)) + if journal['publisher'] == '': + journal['publisher'] = None journal['issn'] = elem.findtext('.//{%s}idno[@type="ISSN"]' % ns) journal['eissn'] = elem.findtext('.//{%s}idno[@type="eISSN"]' % ns) journal['volume'] = elem.findtext('.//{%s}biblScope[@unit="volume"]' % ns) @@ -59,6 +65,8 @@ def biblio_info(elem): ref['title'] = other_title ref['authors'] = all_authors(elem) ref['publisher'] = elem.findtext('.//{%s}publicationStmt/{%s}publisher' % (ns, ns)) + if ref['publisher'] == '': + ref['publisher'] = None date = elem.find('.//{%s}date[@type="published"]' % ns) ref['date'] = (date != None) and date.attrib.get('when') ref['volume'] = elem.findtext('.//{%s}biblScope[@unit="volume"]' % ns) @@ -74,7 +82,7 @@ def biblio_info(elem): return ref -def do_tei(content, encumbered=True): +def teixml2json(content, encumbered=True): if type(content) == str: content = io.StringIO(content) @@ -131,7 +139,7 @@ def main(): # pragma no cover for filename in args.teifiles: content = open(filename, 'r') print(json.dumps( - do_tei(content, + teixml2json(content, encumbered=(not args.no_encumbered)))) if __name__=='__main__': # pragma no cover diff --git a/mapreduce/tests/files/small.json b/mapreduce/tests/files/small.json new file mode 100644 index 0000000..208fb49 --- /dev/null +++ b/mapreduce/tests/files/small.json @@ -0,0 +1,43 @@ +{ + "title": "Dummy Example File", + "authors": [ + {"name": "Brewster Kahle"}, + {"name": "J Doe"} + ], + "journal": { + "name": "Dummy Example File. Journal of Fake News. pp. 1-2. ISSN 1234-5678", + "eissn": null, + "issn": null, + "issue": null, + "publisher": null, + "volume": null + }, + "date": "2000", + "doi": null, + "citations": [ + { "authors": [{"name": "A Seaperson"}], + "date": "2001", + "id": "b0", + "index": 0, + "issue": null, + "journal": "Letters in the Alphabet", + "publisher": null, + "title": "Everything is Wonderful", + "url": null, + "volume": "20"}, + { "authors": [], + "date": "2011-03-28", + "id": "b1", + "index": 1, + "issue": null, + "journal": "The Dictionary", + "publisher": null, + "title": "All about Facts", + "url": null, + "volume": "14"} + ], + "abstract": "Everything you ever wanted to know about nothing", + "body": "Introduction \nEverything starts somewhere, as somebody [1] once said. \n\n In Depth \n Meat \nYou know, for kids. \n Potatos \nQED.", + "acknowledgement": null, + "annex": null +} diff --git a/mapreduce/tests/files/small.xml b/mapreduce/tests/files/small.xml new file mode 100644 index 0000000..78b9ba2 --- /dev/null +++ b/mapreduce/tests/files/small.xml @@ -0,0 +1,110 @@ + + + + + + + GROBID - A machine learning software for extracting information from scholarly documents + + + + + + Dummy Example File + + + + + 2000 + + + + + + BrewsterKahle + + + JDoe + + + + Internet Archive + + + Dummy Example File + + + Dummy Example File. Journal of Fake News. pp. 1-2. ISSN 1234-5678 + + 2000 + + + + + + + + + Fake Data + + + +

Everything you ever wanted to know about nothing

+
+
+
+ + +
Introduction

+Everything starts somewhere, as somebody[1] once said.

+ +
In Depth
+
Meat

+You know, for kids.

+
Potatos

+QED.

+ + +
+ + + + + + Everything is Wonderful + + ASeaperson + + + + Letters in the Alphabet + + 20 + + + + + + + + + All about Facts + + + The Dictionary + + 14 + + + + None + + + +
+
+
+
-- cgit v1.2.3